blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d289d25acaf78e7bb51c689c1de4b4495a3bbd9a
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/fortifications/fortdisabledefenceperiodwindow.py
|
d81f15c0ee94a51d408d7c2853b5cbd29a9df04e
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 4,109
|
py
|
# 2015.11.18 11:54:00 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortDisableDefencePeriodWindow.py
import BigWorld
from adisp import process
from gui import SystemMessages
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortSoundController import g_fortSoundController
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortViewHelper import FortViewHelper
from gui.Scaleform.daapi.view.meta.FortDisableDefencePeriodWindowMeta import FortDisableDefencePeriodWindowMeta
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS as ALIAS, FORTIFICATIONS
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.shared.formatters import text_styles
from gui.shared.fortifications.context import DefencePeriodCtx
from helpers import i18n
class FortDisableDefencePeriodWindow(FortDisableDefencePeriodWindowMeta, FortViewHelper):
def __init__(self, _ = None):
super(FortDisableDefencePeriodWindow, self).__init__()
self.__inputChecker = None
self.__controlNumber = self.fortCtrl.getFort().getTotalDefRes()
return
def initInputChecker(self):
self.__inputChecker.errorMsg = self.__makeInputCheckerError()
self.__inputChecker.questionTitle = self.__makeInputCheckerTitle()
self.__inputChecker.questionBody = self.__makeInputCheckerBody()
self.__inputChecker.setControlNumbers(self.__controlNumber, BigWorld.wg_getIntegralFormat)
def onWindowClose(self):
self.destroy()
def onClickApplyButton(self):
self.__setup()
def onDefenceHourShutdown(self):
if self.fortCtrl.getFort().isDefenceHourShutDown():
self.destroy()
def onShutdownDowngrade(self):
self.destroy()
def _onRegisterFlashComponent(self, viewPy, alias):
self.__inputChecker = viewPy
self.initInputChecker()
def _populate(self):
super(FortDisableDefencePeriodWindow, self)._populate()
self.startFortListening()
if self.fortCtrl.getFort().isDefenceHourShutDown():
return self.destroy()
self.__makeMainData()
def _dispose(self):
self.__inputChecker = None
self.stopFortListening()
super(FortDisableDefencePeriodWindow, self)._dispose()
return
def __makeInputCheckerError(self):
return text_styles.error(i18n.makeString(ALIAS.DEMOUNTBUILDING_ERRORMESSAGE))
def __makeInputCheckerTitle(self):
return text_styles.middleTitle(i18n.makeString(ALIAS.DISABLEDEFENCEPERIODWINDOW_INPUTCHECKER_TITLE))
def __makeInputCheckerBody(self):
controlNumber = BigWorld.wg_getIntegralFormat(self.__controlNumber)
controlNumber = text_styles.middleTitle(str(controlNumber))
questionBody = text_styles.standard(i18n.makeString(ALIAS.DISABLEDEFENCEPERIODWINDOW_INPUTCHECKER_BODY, controlNumber=controlNumber))
return questionBody
def __makeMainData(self):
titleText = text_styles.main(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_TITLE))
redText = text_styles.error(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_BODYREDTEXT))
bodyText = text_styles.main(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_BODY, redText=redText))
self.as_setDataS({'titleText': titleText,
'bodyText': bodyText})
@process
def __setup(self):
result = yield self.fortProvider.sendRequest(DefencePeriodCtx(waitingID='fort/settings'))
if result:
g_fortSoundController.playDefencePeriodDeactivated()
SystemMessages.g_instance.pushI18nMessage(SYSTEM_MESSAGES.FORTIFICATION_DEFENCEHOURDEACTIVATED, type=SystemMessages.SM_TYPE.Warning)
self.destroy()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\fortifications\fortdisabledefenceperiodwindow.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:54:00 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
1d6d89dd402a1014ea003cc594770dd2a2538c49
|
6203b9132af8f78c6cb12242bd223fa17d14f31e
|
/leetcode/problems/556.py
|
82b5c6ed99fea3e9b4e3d051b24c25cb28c78248
|
[] |
no_license
|
joshuap233/algorithms
|
82c608d7493b0d21989b287a2e246ef739e60443
|
dc68b883362f3ddcfb433d3d83d1bbf925bbcf02
|
refs/heads/master
| 2023-08-23T12:44:42.675137
| 2021-09-28T02:37:01
| 2021-09-28T02:37:01
| 230,285,450
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# https://leetcode-cn.com/problems/next-greater-element-iii/
# 556. 下一个更大元素 III
class Solution:
"""与下一个字典序的写法一样"""
MAXI = 2 ** 31 - 1
def nextGreaterElement(self, n: int) -> int:
if n <= 9:
return -1
s = list(str(n))
for i in range(len(s) - 2, -1, -1):
if s[i] < s[i + 1]:
break
else:
return -1
for j in range(len(s) - 1, i, -1):
if s[j] > s[i]:
break
s[i], s[j] = s[j], s[i]
s[i + 1:] = s[len(s) - 1:i:-1] # 逆序
ret = int(''.join(s))
return ret if ret <= self.MAXI else -1
|
[
"shushugo233@gmail.com"
] |
shushugo233@gmail.com
|
99ccf909e1b7071804da551122f2a3d7c85bb020
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/192/usersdata/273/70732/submittedfiles/al6.py
|
62617a79d4eba687c0a500c294d12922ab0a48f2
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
# -*- coding: utf-8 -*-
i= 2
c= 0
n= int(input('digite o valor de n: '))
while(i<n):
if (n%i)==0:
c=c+1
print(i)
i=i+1
if i==0
print(' primo')
if i>0
print('NAO PRIMO'))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
6ec95f89ce993de65e468f212786248298f66665
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/d79b6c84068e6f5fe995a74e39cd3f63d86bb294-<parse_lldp_intf>-bug.py
|
ddbc6fc1aa3322f069ebbc9cb05db83582c1618e
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
def parse_lldp_intf(self, data):
match = re.search('Interface:\\s*(\\S+)', data, re.M)
if match:
return match.group(1)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
7a63e1a4a6717671c7176bf91eda13961f655536
|
99094cc79bdbb69bb24516e473f17b385847cb3a
|
/72.Edit Distance/Solution.py
|
11b4f12f5ee723dcde3137a39b89d6242e6e0462
|
[] |
no_license
|
simonxu14/LeetCode_Simon
|
7d389bbfafd3906876a3f796195bb14db3a1aeb3
|
13f4595374f30b482c4da76e466037516ca3a420
|
refs/heads/master
| 2020-04-06T03:33:25.846686
| 2016-09-10T00:23:11
| 2016-09-10T00:23:11
| 40,810,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
__author__ = 'Simon'
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
dp = [[0 for j in range(len(word2)+1)] for i in range(len(word1)+1)]
for j in range(len(word2)+1):
dp[0][j] = j
for i in range(len(word1)+1):
dp[i][0] = i
for i in range(1,len(word1)+1):
for j in range(1,len(word2)+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
return dp[len(word1)][len(word2)]
|
[
"simonxu14@gmail.com"
] |
simonxu14@gmail.com
|
f41bb0f627ed6d8f5fd7b2f6953ef836320c19d9
|
9b68695d6d7d05bdfdcb087db532d66188cfbcdb
|
/bsmsm/spiders/spider.py
|
67165b7f5f3a4693d22e7d719589e6d28ffc76e2
|
[] |
no_license
|
hristo-grudev/bsmsm
|
1f100180535b564cd8ca59fd62b35de4cf25b460
|
e7035250b07e21e25299967eee065ea588369857
|
refs/heads/main
| 2023-03-13T13:13:48.075506
| 2021-03-05T08:32:08
| 2021-03-05T08:32:08
| 344,745,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
import scrapy
from scrapy.loader import ItemLoader
from ..items import BsmsmItem
from itemloaders.processors import TakeFirst
class BsmsmSpider(scrapy.Spider):
name = 'bsmsm'
start_urls = ['https://www.bsm.sm/it/news-bsm.php']
def parse(self, response):
post_links = response.xpath('//div[@class="titolo-news bold"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
title = response.xpath('//h1//text()').get()
description = response.xpath('//span[@itemprop="description"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="bold"]/text()').get()
item = ItemLoader(item=BsmsmItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
5c77958a70db3fdb38303d8bf678113803c62984
|
d57b51ec207002e333b8655a8f5832ed143aa28c
|
/.history/gos_20200614055443.py
|
e65771e72edd5ad03a64cf83c5b0bcf4ef404048
|
[] |
no_license
|
yevheniir/python_course_2020
|
b42766c4278a08b8b79fec77e036a1b987accf51
|
a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b
|
refs/heads/master
| 2022-11-15T07:13:24.193173
| 2020-07-11T15:43:26
| 2020-07-11T15:43:26
| 278,890,802
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
# # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# Імпорт фажливих бібліотек
import requests
for pic_
with open('pic1.jpg', 'wb') as handle:
response = requests.get(pic_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
|
[
"yevheniira@intelink-ua.com"
] |
yevheniira@intelink-ua.com
|
abe78bc49b85c74a1b2f4932b3ed2e0bab37eb16
|
ffa21e4415ead5106f7f846bc24b0d308ace90b5
|
/swagger_client/models/forecast_transaction.py
|
be10b2bc4700700721d9092cecf9dddd1c89aefa
|
[] |
no_license
|
steini58/swagger-client
|
fa7b6f077e5a1b01e42c4420b214b19e1d364e4e
|
e5fd7bf28f8529746e18bdd799c86ad78310ffd5
|
refs/heads/master
| 2020-03-29T09:14:26.644065
| 2018-09-20T13:29:14
| 2018-09-20T13:29:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,665
|
py
|
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.balance import Balance # noqa: F401,E501
from swagger_client.models.forecast import Forecast # noqa: F401,E501
from swagger_client.models.transaction import Transaction # noqa: F401,E501
class ForecastTransaction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'forecast_balance': 'Balance',
'account_id': 'str',
'transactions': 'list[Transaction]'
}
attribute_map = {
'forecast_balance': 'forecastBalance',
'account_id': 'accountId',
'transactions': 'transactions'
}
def __init__(self, forecast_balance=None, account_id=None, transactions=None): # noqa: E501
"""ForecastTransaction - a model defined in Swagger""" # noqa: E501
self._forecast_balance = None
self._account_id = None
self._transactions = None
self.discriminator = None
self.forecast_balance = forecast_balance
self.account_id = account_id
self.transactions = transactions
@property
def forecast_balance(self):
"""Gets the forecast_balance of this ForecastTransaction. # noqa: E501
Balance forecast # noqa: E501
:return: The forecast_balance of this ForecastTransaction. # noqa: E501
:rtype: Balance
"""
return self._forecast_balance
@forecast_balance.setter
def forecast_balance(self, forecast_balance):
"""Sets the forecast_balance of this ForecastTransaction.
Balance forecast # noqa: E501
:param forecast_balance: The forecast_balance of this ForecastTransaction. # noqa: E501
:type: Balance
"""
if forecast_balance is None:
raise ValueError("Invalid value for `forecast_balance`, must not be `None`") # noqa: E501
self._forecast_balance = forecast_balance
@property
def account_id(self):
"""Gets the account_id of this ForecastTransaction. # noqa: E501
Id of account this entry belongs to # noqa: E501
:return: The account_id of this ForecastTransaction. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this ForecastTransaction.
Id of account this entry belongs to # noqa: E501
:param account_id: The account_id of this ForecastTransaction. # noqa: E501
:type: str
"""
if account_id is None:
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def transactions(self):
"""Gets the transactions of this ForecastTransaction. # noqa: E501
List of unappliedTransaction # noqa: E501
:return: The transactions of this ForecastTransaction. # noqa: E501
:rtype: list[Transaction]
"""
return self._transactions
@transactions.setter
def transactions(self, transactions):
"""Sets the transactions of this ForecastTransaction.
List of unappliedTransaction # noqa: E501
:param transactions: The transactions of this ForecastTransaction. # noqa: E501
:type: list[Transaction]
"""
if transactions is None:
raise ValueError("Invalid value for `transactions`, must not be `None`") # noqa: E501
self._transactions = transactions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForecastTransaction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"peter.steinberg@natur-und-genuss.de"
] |
peter.steinberg@natur-und-genuss.de
|
55d5457523106be301584f485d2044be5c180be7
|
ed32eb1eb0a328a4ffe89e178fc4987470f333cd
|
/exercise/day1-4/compute.py
|
7e64771516f775b9bee62dbd5f5d8fe460b8b9c5
|
[] |
no_license
|
xiaoyaojjian/py_learn
|
c6f5bdf31bcebf29dd914e81e6be9305a61265cc
|
95e494ea823d2074a05c1c2a49595002a1576093
|
refs/heads/master
| 2020-12-05T23:22:11.017066
| 2016-09-08T01:13:08
| 2016-09-08T01:13:08
| 67,654,055
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
"""
计算器, 用了eval() 没有任何意义了, 四则运算应该单独写一个函数处理
"""
import re
a = '1 - 2 * ( (60-30 +(-40/5) * (9-2*5/3 + 7 /3*99/4*2998 +10 * 568/14 )) - (-4*3)/ (16-3*2) )'
print(eval(a))
def get_brackets_data(formula):
return re.findall('\(([^()]+)\)', formula)
while re.search('[()]', a):
for i in get_brackets_data(a):
a = a.replace('(%s)' % i, str(eval(i)))
print(a)
print(eval(a))
|
[
"q2868765@qq.com"
] |
q2868765@qq.com
|
4577eaed8369402971817fc693acae6518a09f80
|
bd81142f05e57b637cc0ddd63edbc3c6b5b4a0a2
|
/knowledge-driven-dialogue/generative_pt/tools/conversation_client.py
|
598d22bca00ebba8dd12eac1105b2e8df08d391f
|
[
"MIT"
] |
permissive
|
Chriszhangmw/ChatBots
|
876d751f30d1d8ea759440fe1e7d4beb6ef94087
|
0735918e326bd6ff20b70388ae199ec11d9cbc11
|
refs/heads/master
| 2021-12-14T04:10:53.452552
| 2021-11-28T12:23:10
| 2021-11-28T12:23:10
| 210,681,028
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: conversation_client.py
"""
import sys
import socket
SERVER_IP = "127.0.0.1"
SERVER_PORT = 8601
def conversation_client(text):
"""
conversation_client
"""
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect((SERVER_IP, SERVER_PORT))
mysocket.sendall(text.encode())
result = mysocket.recv(4096).decode()
mysocket.close()
return result
def main():
"""
main
"""
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " eval_file")
exit()
for line in open(sys.argv[1]):
response = conversation_client(line.strip())
print(response)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nExited from the program ealier!")
|
[
"zhangmw_play@163.com"
] |
zhangmw_play@163.com
|
cd7536fbdfbd4277136ae6edaee967cd1d86ab18
|
60618d48e09a140926d97b01cb9b6f76fcc65703
|
/data analysis/itheima/plot.py
|
6a18b4bded99c6dbf7247578045daa0392a6d27a
|
[] |
no_license
|
Incipe-win/Python
|
ca8f36cc8785eb13512f71a3cf10149d4e1b855e
|
5bab36b90591c74dedb6ead3484a279b90a1bcbd
|
refs/heads/master
| 2021-01-07T08:11:42.293541
| 2020-12-06T09:17:02
| 2020-12-06T09:17:02
| 241,629,236
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
from matplotlib import pyplot as plt
import random
from matplotlib import font_manager
my_font = font_manager.FontProperties(
fname="/usr/share/fonts/opentype/noto/NotoSansCJK-Bold.ttc")
# import matplotlib
#
# font = {"family": "Noto Sans Mono",
# "weight": "bold",
# "size": "larger"
# }
# matplotlib.rc("font", **font)
# x = range(2, 26, 2)
# y = [15, 13, 14.5, 17, 20, 25, 26, 26, 24, 22, 18, 15]
#
# plt.figure(num="hh", figsize=(20, 8), dpi=80)
# plt.tick_params(axis='x', colors="green")
# x_ticks = [i/2 for i in range(4, 49)]
# x_labels = ["h" + str(i) for i in range(1, 14)]
# plt.xticks(x_ticks[::3], x_labels)
# plt.yticks(range(min(y), max(y) + 1))
#
# plt.plot(x, y)
# plt.savefig("./test.svg")
# plt.show()
# y = [random.randint(20, 35) for i in range(120)]
# cnt = 10
# x = []
# for i in range(120):
# if i == 60:
# cnt += 1
# i %= 60
# s = str(i) if i >= 10 else "0" + str(i)
# x.append(str(cnt) + ":" + s)
# plt.figure(figsize=(100, 15), dpi=80)
# plt.tick_params(axis='both', colors="green")
# plt.xticks(list(range(120))[::3], labels=x[::3], rotation=45,
# fontproperties=my_font)
# plt.yticks(range(19, 36))
# plt.xlabel("时间", fontproperties=my_font)
# plt.ylabel("温度 单位(摄氏度)", fontproperties=my_font)
# plt.title("10~12点每分钟气温变化情况", fontproperties=my_font)
# plt.plot(x, y)
# plt.show()
y1 = [1, 0, 1, 1, 2, 4, 3, 2, 3, 4, 4, 5, 6, 5, 4, 3, 3, 1, 1, 1]
y2 = [1, 0, 3, 1, 2, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]
x = ["{}岁".format(i) for i in range(11, 31)]
plt.figure(figsize=(20, 15), dpi=80)
plt.tick_params(axis="both", colors="green")
plt.xticks(list(range(20)), labels=x, rotation=45, fontproperties=my_font)
plt.yticks(range(min(y1), max(y1)))
plt.xlabel("xx", fontproperties=my_font)
plt.ylabel("xxxx 单位(个)", fontproperties=my_font)
plt.title("xx~xxxx", fontproperties=my_font)
plt.grid(alpha=0.4)
plt.plot(x, y1, color="green", label="xx")
plt.plot(x, y2, color="blue", label="xx")
plt.legend(prop=my_font)
# plt.show()
plt.savefig("./plot.svg")
|
[
"whc_9_13@163.com"
] |
whc_9_13@163.com
|
40d6ad7c3c49e310f10e435aee22d2aa9b19a03c
|
68eb441faf3f9415fbcbc8330f9b01ad6933bede
|
/ebook/machinelearningdemo/MachineLearningLessonPro/ML_1/3.0loadfile_fromdata.py
|
1711ef93a3ae8eea6d78e080a3ca39a2781775f4
|
[] |
no_license
|
OrriO/jupyter_myworkspace
|
fb8e97865f15abe2fb3aa01985fdb4f34317f15f
|
a592ab92f38a1cd466c454bb36fd0002c75202a9
|
refs/heads/master
| 2023-06-01T02:00:36.986439
| 2021-07-08T13:44:26
| 2021-07-08T13:44:26
| 381,997,768
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
#-*- coding: utf-8 -*-
# @Time : 2018/12/6 10:08
# @Author : Z
# @Email : S
# @File : 3.0loadfile_fromdata.py
from surprise import BaselineOnly
from surprise import Dataset
from surprise import Reader
from surprise.model_selection import cross_validate
import os
# path to dataset file
file_path = os.path.expanduser('./u.data')
# As we're loading a custom dataset, we need to define a reader. In the
# movielens-100k dataset, each line has the following format:
# 'user item rating timestamp', separated by '\t' characters.
reader = Reader(line_format='user item rating timestamp', sep='\t')
data = Dataset.load_from_file(file_path, reader=reader)
# We can now use this dataset as we please, e.g. calling cross_validate
cross_validate(BaselineOnly(), data, verbose=True)
|
[
"guojj1@guahao.com"
] |
guojj1@guahao.com
|
ed6019a55cbe49b15e4cbe71343c9ea879f3e984
|
bd14c979335112b7718b0feda18ebf0e3b40fe5c
|
/arihon_biginners/review_of_binsearch.py
|
3ae4f4958eb3c9ebc42c27e83fb5e6cc36c26f9e
|
[] |
no_license
|
ababa831/atcoder_beginners
|
22c57b15333d110126d1b1afadc0ff5e8784fc4f
|
1a30882ce7f20f312045d5dc7bfaa5688cc8a88e
|
refs/heads/master
| 2023-03-07T15:47:19.750682
| 2020-03-04T19:53:45
| 2020-03-04T19:53:45
| 143,360,607
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# Review of a simple bit search algorithm
D = 3
print('Number of digit', D)
combinations = []
for i in range(1 << D):
flaged = []
for j in range(D):
if (i >> j) & 1:
flaged.append(j + 1)
print('Binary {} has flags at digit {}'.format(bin(i), flaged))
combinations.append(flaged)
print('Total number of combinations ', len(combinations))
print('Combinations: ', combinations)
|
[
"flvonlineconverter@gmail.com"
] |
flvonlineconverter@gmail.com
|
195b5b5bf3d61d63758c2c4cdb7d1942a70e832d
|
3f5d531abcf69bc9f7de317ce46d45786272013d
|
/src/config/test/test_default.py
|
7711ddcd42e45b5fc7232a940a9bceb55d370e5a
|
[
"MIT"
] |
permissive
|
thak123/i-tagger
|
61a8880e250069fc40c0a616e718a739bd27cb58
|
dd8502947011e95b72b243fad9aad094b9a7d15c
|
refs/heads/master
| 2021-05-14T16:51:20.799677
| 2018-01-02T12:09:36
| 2018-01-02T12:09:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
import unittest
from helpers.config_helper import *
from config.default import create_default_config
class TestDefault(unittest.TestCase):
def test_creation(self):
config_helper = ConfigManager("/tmp/config.ini")
if not os.path.exists("/tmp/config.ini"):
create_default_config(config_helper)
self.assertEqual(os.path.exists("/tmp/config.ini"), True)
self.assertEqual(config_helper.get_item("Schema", "text_column"), "word")
|
[
"mageswaran1989@gmail.com"
] |
mageswaran1989@gmail.com
|
17af632bafeab7fe05ec6df418b301f86f74b0cb
|
582df95fc9b1d00e6c75321ad6a7894e0722245e
|
/tests/test_download_model.py
|
34baadec66352e161086017e45cd3ea66aadfa94
|
[
"Apache-2.0"
] |
permissive
|
viniarck/podcaststore-django
|
2c4db217126e3dbdf1244bb22ae1aea0cd502874
|
90316ffb18793b089291a0e28ac3ee2bb5e458cb
|
refs/heads/master
| 2020-06-29T13:16:18.449358
| 2019-12-05T11:44:34
| 2019-12-05T11:44:34
| 200,547,759
| 0
| 0
|
Apache-2.0
| 2020-06-05T22:33:18
| 2019-08-04T22:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 828
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from podcaststore_api.models.download import Download, DownloadSerializer
class TestDownloadModel:
"""TestDownloadModel."""
@pytest.mark.django_db
def test_repr(self, create_download: Download) -> None:
"""Test __repr__ method."""
download = create_download
assert (
repr(download)
== f"Download({download.id}, {download.track_id}, {download.date})"
)
class TestTagSerializer:
"""TestTagSerializer"""
@pytest.mark.django_db
def test_ser_data(self, create_download: Download) -> None:
"""Test serialization data."""
download_serd = DownloadSerializer(create_download)
for field in ("id", "track_id", "date"):
assert field in download_serd.data
|
[
"viniarck@gmail.com"
] |
viniarck@gmail.com
|
a161c21ea948b07a05375c924672731065a639c1
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/tests/test_models/test_dense_heads/test_yolact_head.py
|
11b74a3b9a7c7d2bae8547cf62e2ad4fdb73cec3
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,894
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
def test_yolact_head_loss():
"""Tests yolact head losses when truth is empty and non-empty."""
s = 550
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False,
min_gt_box_wh=[4.0, 4.0]))
bbox_head = YOLACTHead(
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True,
train_cfg=train_cfg)
segm_head = YOLACTSegmHead(
in_channels=256,
num_classes=80,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
mask_head = YOLACTProtonet(
num_classes=80,
in_channels=256,
num_protos=32,
max_masks_to_train=100,
loss_mask_weight=6.125)
feat = [
torch.rand(1, 256, feat_size, feat_size)
for feat_size in [69, 35, 18, 9, 5]
]
cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# Test segm head and mask head
segm_head_outs = segm_head(feat[0])
empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas,
sampling_results)
# When there is no truth, the segm and mask loss should be zero.
empty_segm_loss = sum(empty_segm_loss['loss_segm'])
empty_mask_loss = sum(empty_mask_loss['loss_mask'])
assert empty_segm_loss.item() == 0, (
'there should be no segm loss when there are no true boxes')
assert empty_mask_loss == 0, (
'there should be no mask loss when there are no true boxes')
# When truth is non-empty then cls, box, mask, segm loss should be
# nonzero for random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
one_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert one_gt_box_loss.item() > 0, 'box loss should be non-zero'
one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero'
assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero'
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
78a9d21635b3b2f9d4282deb74507c8b86a89628
|
ea2015881c18583a4295122f2e2c1d2dbd3e32f9
|
/_pipeline_scripts/script_6.4.3_ps_prot_pairDave.py
|
6b12ef0ebf30ba4e369c2c941843af7dcdf42b21
|
[] |
no_license
|
panchyni/PseudogenePipeline
|
ad0b210d943bfdc83da1eeb63c0d7dec2a8719ae
|
44a5bfd034dfd9b21808b6e6c5b789f141912c33
|
refs/heads/master
| 2021-01-11T15:54:57.514872
| 2017-04-17T21:13:16
| 2017-04-17T21:13:16
| 79,955,253
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
import sys
print "Read protein sequence file..."
inp = open(sys.argv[1])
inl = inp.readlines()
p = {}
for i in inl:
if i[0] == ">":
g = i[1:-1].split(".")
if g[0] not in p:
p[g[0]] = [g[1]]
else:
p[g[0]].append(g[1])
print "Read pair file..."
inp = open(sys.argv[2]) # osv5_ps_gene.pairs
oup = open("osv5_ps_prot.pairs","w")
inl = inp.readlines()
miss = []
for i in inl:
L = i[:-1].split("\t")
if L[1] in p:
for j in p[L[1]]:
oup.write("%s\t%s.%s\n" % (L[0],L[1],j))
else:
if L[1] not in miss:
miss.append(L[1])
print "The following genes are not in the prot seq file:"
for i in miss:
print "",i
print "Done!"
|
[
"panchyni.msu.edu"
] |
panchyni.msu.edu
|
a07caa95edb7398b9588e8dbf134ba5d00978be0
|
977073b97242b8bf48b49e145395d8d948890924
|
/experiments/run_submission.py
|
0aaa1722561252ba0e1393e56e7ad046f830a6f5
|
[] |
no_license
|
upura/booking-challenge-2021
|
c80e88f8545ae1b5b8e3d9da3bac49f3ea982ee5
|
7b6daa2fabd28773cc452cd6605861372ea64d78
|
refs/heads/master
| 2023-03-03T16:22:45.258906
| 2021-02-17T20:36:06
| 2021-02-17T20:36:06
| 325,207,679
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,768
|
py
|
import gc
import numpy as np
import pandas as pd
from sklearn import preprocessing
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from src.datasets import load_train_test, BookingDataset
from src.models import BookingNN
from src.utils import seed_everything
from src.runner import CustomRunner
if __name__ == '__main__':
seed_everything(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
categorical_cols = [
'user_id',
# 'device_class',
# 'affiliate_id',
'booker_country',
# 'hotel_country'
]
train_test = load_train_test()
cat_dims = [int(train_test[col].nunique()) for col in categorical_cols]
emb_dims = [(x, min(50, (x + 1) // 2)) for x in cat_dims]
target_le = preprocessing.LabelEncoder()
train_test['city_id'] = target_le.fit_transform(train_test['city_id'])
for c in categorical_cols:
le = preprocessing.LabelEncoder()
train_test[c] = le.fit_transform(train_test[c].astype(str).fillna('unk').values)
test = train_test[~train_test['row_num'].isnull()]
test_trips = test[test['city_id'] != test['city_id'].shift(1)].query('city_id!=0').groupby('utrip_id')['city_id'].apply(lambda x: x.values).reset_index()
X_test = test[test['city_id'] != test['city_id'].shift(1)].query('city_id!=0').groupby('utrip_id')[categorical_cols].last().reset_index()
X_test['city_id'] = test_trips['city_id']
X_test = X_test.reset_index(drop=True)
test_dataset = BookingDataset(X_test, is_train=False)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=1)
del train_test, test, test_trips
gc.collect()
model_paths = [
'../input/booking-bi-lstm-ep1/logdir_nn000',
]
for mp in model_paths:
for fold_id in (0,):
runner = CustomRunner(device=device)
model = BookingNN(len(target_le.classes_))
pred = []
for prediction in tqdm(runner.predict_loader(loader=test_loader,
resume=f'{mp}/fold{fold_id}/checkpoints/best.pth',
model=model,)):
pred.append(target_le.inverse_transform(np.argsort(prediction.cpu().numpy()[-1, :])[-4:]))
pred = np.array(pred)
np.save(f"y_pred{mp.replace('/', '_').replace('.', '')}_fold{fold_id}", pred)
submission = pd.concat([
X_test['utrip_id'],
pd.DataFrame(pred, columns=['city_id_1', 'city_id_2', 'city_id_3', 'city_id_4'])
], axis=1)
print(submission.head())
submission.to_csv('submission.csv', index=False)
|
[
"upura0@gmail.com"
] |
upura0@gmail.com
|
263f9d74b0c56b54ae61b705fc78e35537aa37aa
|
1bdf38834c22b0100595cb22f2862fd1ba0bc1e7
|
/code394DecodeString.py
|
6498e6f8c2f6d46d2cadc4e51089b069f52ef7bd
|
[] |
no_license
|
cybelewang/leetcode-python
|
48d91c728856ff577f1ccba5a5340485414d6c6e
|
635af6e22aa8eef8e7920a585d43a45a891a8157
|
refs/heads/master
| 2023-01-04T11:28:19.757123
| 2020-10-29T05:55:35
| 2020-10-29T05:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
"""
394 Decode String
Given an encoded string, return it's decoded string.
The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.
You may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.
Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].
Examples:
s = "3[a]2[bc]", return "aaabcbc".
s = "3[a2[c]]", return "accaccacc".
s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
"""
class Solution:
# OJ's best
def decodeString(self, s):
stack = []; curNum = 0; curString = ''
for c in s:
if c == '[':
stack.append(curString)
stack.append(curNum)
curString = ''
curNum = 0
elif c == ']':
num = stack.pop()
prevString = stack.pop()
curString = prevString + num*curString
elif c.isdigit():
curNum = curNum*10 + int(c)
else:
curString += c
return curString
# my solution
def decodeString2(self, s):
"""
:type s: str
:rtype: str
"""
stack, num = [''], 0
for c in s:
if c.isdigit():
num = num*10 + ord(c) - ord('0')
elif c == '[':
stack.append(num)
stack.append('')
num = 0
elif c == ']':
sub = stack.pop()
count = stack.pop()
stack[-1] += sub*count
num = 0
else:
stack[-1] += c
num = 0
return stack[-1]
obj = Solution()
test_cases = ['', 'abcde', '3[a]2[bc]', '3[a2[c]]', '2[abc]3[cd]ef']
for case in test_cases:
print(obj.decodeString(case))
|
[
"guoligit@gmail.com"
] |
guoligit@gmail.com
|
2542635ffe3127c2fbac935e327705fd7fcb674b
|
cc7bbdbb22cb6f7e7916388a5ee8218bc8ffa158
|
/Python3/Tornado/apps/pg/PG_Client/clientadmin/utils.py
|
971af7cb5a0d2998e2e927d20769b4a5cd027213
|
[
"MIT"
] |
permissive
|
youngqqcn/QBlockChainNotes
|
a816e067642f48a6da38b624663254b4016ec496
|
c9c143eaba6c06e3cee866669ec286e4d3cdbba8
|
refs/heads/master
| 2023-04-03T23:31:05.585545
| 2023-03-30T09:29:07
| 2023-03-30T09:29:07
| 155,657,459
| 37
| 15
|
MIT
| 2023-03-06T23:09:32
| 2018-11-01T03:33:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt认证成功返回数据
:token 返回的jwt
:user 当前登录的用户信息[对象]
:request 当前本次客户端提交过来的数据
"""
if user.username != None:
return {
'code': "fail",
"status": 200,
"data": {
"gcode" : user.username,
"detail": "请输入验证码,重新登录!",
}
}
return {
'code': "success",
"status": 200,
"data": {
'token': token,
'pro_id': user.pro_id,
'username': user.pro_name,
'email': user.email,
'tel_no': user.tel_no,
"detail": "登录成功!",
}
}
def jwt_response_payload_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 请检查账号信息是否正确,重新登录! ",
}
}
def jwt_response_payload_code_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 请检查谷歌验证码是否正确,重新登录! ",
}
}
def jwt_response_payload_frequently_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 登录频繁! ",
}
}
|
[
"youngqqcn@163.com"
] |
youngqqcn@163.com
|
616481b2e75063bd42b700b4baac1bdbbd6f92b1
|
1804187f39dd6004250933b35ba9ce24297f32a5
|
/car_importclass.py
|
860b39b3d9c08872ea8be65c07d26f6029ef9c66
|
[] |
no_license
|
xiaomengxiangjia/Python
|
ecd2e3e8576364f15482669cb75b52b8790543f5
|
7f52a33d7956068d26347cf34d35c953b945a635
|
refs/heads/master
| 2020-03-20T23:01:09.981928
| 2018-08-23T09:04:53
| 2018-08-27T05:46:38
| 137,825,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
"""一个可用于表示汽车的类"""
class Car():
"""一次模拟汽车的简单尝试"""
def __init__(self, make, model, year):
"""初始化描述汽车的属性"""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""返回整洁的描述性名称"""
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""打印一条消息,指出汽车的里程"""
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
"""
将里程表读数设置为指定的值
拒绝将里程表往回拨
"""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self, miles):
"""将里程表读数增加指定的量"""
self.odometer_reading += miles
"""一组用于表示电动汽车的类"""
class Battery():
"""一次模拟电动汽车电瓶的简单尝试"""
def __init__(self, battery_size=60):
"""初始化电瓶的属性"""
self.battery_size = battery_size
def describe_battery(self):
"""打印一条描述电瓶容量的消息"""
print("This car has a " + str(self.battery_size) + "-kwh battery.")
def get_range(self):
"""打印一条描述电瓶续航里程的消息"""
range = 200
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
class ElectricCar(Car):
"""模拟电动汽车的独特之处"""
def __init__(self, make, model, year):
"""
初始化父类的属性,再初始化电动汽车特有的属性
"""
super().__init__(make, model, year)
self.battery = Battery()
|
[
"645334483@qq.com"
] |
645334483@qq.com
|
276bccd4f16fb7b435ac61d0da296658d2a152fd
|
97ae427ff84c9b0450ed709dc55e1cc0e1edc096
|
/til/future_til/class_level_operators.py
|
02723ea43b703bfd62523ad8737ad110b21d2a4e
|
[] |
no_license
|
OaklandPeters/til
|
9081ac8b968223f4c92b38cf20cda90c92966628
|
12a1f7623916709211686d7817b93c7ef4d532d2
|
refs/heads/master
| 2021-01-17T14:16:48.285244
| 2016-06-20T14:23:40
| 2016-06-20T14:23:40
| 51,449,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
"""
This has not been made into a TIL in til/python yet, because...
it does not work correctly atm.
However, I'm reasonably sure I can get it to work (since I've got type-level operatores to work in the past)
"""
#
# Class-level operators
#--------------------------
# Requires metaclasses
# To make this work with instance-level overrides is complicated
# ... I should look to the proper method lookup, as described here:
# https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/
#
# ... actually, I'm pretty sure I need to use something like my @pedanticmethod
# to make __mul__ work as both a classmethod and instancemethod
class OperatorMeta(type):
def __mul__(cls, other):
if hasattr(cls, '__mul__'):
return cls.__mul__(other)
else:
return type.__mul__(cls, other)
raise TypeError(str.format(
"unspported operand type(s) for *: '{0}' and '{1}'",
cls.__name__, type(other).__name__
))
class ThreeBase(metaclass=OperatorMeta):
base = 3
@classmethod
def __mul__(cls, value):
return cls.base * value
def __init__(self, base):
self.base = base
assert((ThreeBase * 5) == 15)
assert((ThreeBase(10) * 5) == 50 ) # WRONG. Still returns 15
# This does not work correctly, the problem being I forget how
# to make OperatorMeta.__mul__ proxy down to the instance level
# ... HOWEVER, if I look up the standard rules for method lookup,
# in relation to metaclasses (the standard metaclass being 'type')
# then that should show me what to do
|
[
"oakland.peters@gmail.com"
] |
oakland.peters@gmail.com
|
b0e3a882a9cb2bf2f6a1e29d61545ed83bc64a05
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02882/s441562654.py
|
2251e4e406faa3b13c7d32923f7711a41c800a0e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
a, b, x = map(int, input().split())
if x > (a**2)*b/2:
t = 2*((a**2)*b-x)/(a**3)
else:
t = a*(b**2)/(2*x)
import math
ans = math.degrees(math.atan(t))
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7eae7b743e1fdb51757eab7546ee206614610ba1
|
43b4cabe8b711d9eb6988a17d0914cf95ac1c5a1
|
/Lesson-2/7_BookmarkServer/BookmarkServer.py
|
b42fd8ceecfd9c42216f26e2953ac331e00dca63
|
[] |
no_license
|
fatih-iver/course-ud303
|
c9aae321336c8e0b3ed1e671338cc993d04dc34b
|
64d2107891cc24d303dffb98216a72505eeeb217
|
refs/heads/master
| 2020-03-24T21:57:30.923020
| 2018-08-04T11:50:20
| 2018-08-04T11:50:20
| 143,059,407
| 0
| 0
| null | 2018-07-31T19:40:50
| 2018-07-31T19:40:49
| null |
UTF-8
|
Python
| false
| false
| 5,827
|
py
|
#!/usr/bin/env python3
#
# A *bookmark server* or URI shortener that maintains a mapping (dictionary)
# between short names and long URIs, checking that each new URI added to the
# mapping actually works (i.e. returns a 200 OK).
#
# This server is intended to serve three kinds of requests:
#
# * A GET request to the / (root) path. The server returns a form allowing
# the user to submit a new name/URI pairing. The form also includes a
# listing of all the known pairings.
# * A POST request containing "longuri" and "shortname" fields. The server
# checks that the URI is valid (by requesting it), and if so, stores the
# mapping from shortname to longuri in its dictionary. The server then
# redirects back to the root path.
# * A GET request whose path contains a short name. The server looks up
# that short name in its dictionary and redirects to the corresponding
# long URI.
#
# Your job in this exercise is to finish the server code.
#
# Here are the steps you need to complete:
#
# 1. Write the CheckURI function, which takes a URI and returns True if a
# request to that URI returns a 200 OK, and False otherwise.
#
# 2. Write the code inside do_GET that sends a 303 redirect to a known name.
#
# 3. Write the code inside do_POST that sends a 400 error if the form fields
# are missing.
#
# 4. Write the code inside do_POST that sends a 303 redirect to the form
# after saving a newly submitted URI.
#
# 5. Write the code inside do_POST that sends a 404 error if a URI is not
# successfully checked (i.e. if CheckURI returns false).
#
# In each step, you'll need to delete a line of code that raises the
# NotImplementedError exception. These are there as placeholders in the
# starter code.
#
# After writing each step, restart the server and run test.py to test it.
import http.server
import requests
from urllib.parse import unquote, parse_qs
memory = {}
form = '''<!DOCTYPE html>
<title>Bookmark Server</title>
<form method="POST">
<label>Long URI:
<input name="longuri">
</label>
<br>
<label>Short name:
<input name="shortname">
</label>
<br>
<button type="submit">Save it!</button>
</form>
<p>URIs I know about:
<pre>
{}
</pre>
'''
def CheckURI(uri, timeout=5):
'''Check whether this URI is reachable, i.e. does it return a 200 OK?
This function returns True if a GET request to uri returns a 200 OK, and
False if that GET request returns any other response, or doesn't return
(i.e. times out).
'''
try:
r = requests.get(uri, timeout=timeout)
# If the GET request returns, was it a 200 OK?
return r.status_code == 200
except requests.RequestException:
# If the GET request raised an exception, it's not OK.
return False
class Shortener(http.server.BaseHTTPRequestHandler):
def do_GET(self):
# A GET request will either be for / (the root path) or for /some-name.
# Strip off the / and we have either empty string or a name.
name = unquote(self.path[1:])
if name:
if name in memory:
# 2. Send a 303 redirect to the long URI in memory[name].
self.send_response(303)
longuri = memory[name]
self.send_header('Location', longuri)
self.end_headers()
else:
# We don't know that name! Send a 404 error.
self.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("I don't know '{}'.".format(name).encode())
else:
# Root path. Send the form.
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# List the known associations in the form.
known = "\n".join("{} : {}".format(key, memory[key])
for key in sorted(memory.keys()))
self.wfile.write(form.format(known).encode())
def do_POST(self):
# Decode the form data.
length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(length).decode()
params = parse_qs(body)
# Check that the user submitted the form fields.
if "longuri" not in params or "shortname" not in params:
# 3. Serve a 400 error with a useful message.
self.send_response(400)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("Missing form fields!".encode())
return
longuri = params["longuri"][0]
shortname = params["shortname"][0]
if CheckURI(longuri):
# This URI is good! Remember it under the specified name.
memory[shortname] = longuri
# 4. Serve a redirect to the root page (the form).
self.send_response(303)
self.send_header('Location', '/')
self.end_headers()
else:
# Didn't successfully fetch the long URI.
# 5. Send a 404 error with a useful message.
self.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("Couldn't fetch URI '{}'. Sorry!".format(longuri).encode())
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8000)) # Use PORT if it's there.
server_address = ('', port)
httpd = http.server.HTTPServer(server_address, Shortener)
httpd.serve_forever()
|
[
"noreply@github.com"
] |
fatih-iver.noreply@github.com
|
759f2892a4b03efd81ece2f4d33a6eba2ba16139
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/OpenGL/raw/GLX/MESA/query_renderer.py
|
072891b41d9ef525470951c92ec96a668f34048f
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_MESA_query_renderer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_MESA_query_renderer',error_checker=_errors._error_checker)
GLX_RENDERER_ACCELERATED_MESA=_C('GLX_RENDERER_ACCELERATED_MESA',0x8186)
GLX_RENDERER_DEVICE_ID_MESA=_C('GLX_RENDERER_DEVICE_ID_MESA',0x8184)
GLX_RENDERER_OPENGL_COMPATIBILITY_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_COMPATIBILITY_PROFILE_VERSION_MESA',0x818B)
GLX_RENDERER_OPENGL_CORE_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_CORE_PROFILE_VERSION_MESA',0x818A)
GLX_RENDERER_OPENGL_ES2_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_ES2_PROFILE_VERSION_MESA',0x818D)
GLX_RENDERER_OPENGL_ES_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_ES_PROFILE_VERSION_MESA',0x818C)
GLX_RENDERER_PREFERRED_PROFILE_MESA=_C('GLX_RENDERER_PREFERRED_PROFILE_MESA',0x8189)
GLX_RENDERER_UNIFIED_MEMORY_ARCHITECTURE_MESA=_C('GLX_RENDERER_UNIFIED_MEMORY_ARCHITECTURE_MESA',0x8188)
GLX_RENDERER_VENDOR_ID_MESA=_C('GLX_RENDERER_VENDOR_ID_MESA',0x8183)
GLX_RENDERER_VERSION_MESA=_C('GLX_RENDERER_VERSION_MESA',0x8185)
GLX_RENDERER_VIDEO_MEMORY_MESA=_C('GLX_RENDERER_VIDEO_MEMORY_MESA',0x8187)
@_f
@_p.types(_cs.Bool,_cs.c_int,ctypes.POINTER(_cs.c_uint))
def glXQueryCurrentRendererIntegerMESA(attribute,value):pass
@_f
@_p.types(ctypes.c_char_p,_cs.c_int)
def glXQueryCurrentRendererStringMESA(attribute):pass
@_f
@_p.types(_cs.Bool,ctypes.POINTER(_cs.Display),_cs.c_int,_cs.c_int,_cs.c_int,ctypes.POINTER(_cs.c_uint))
def glXQueryRendererIntegerMESA(dpy,screen,renderer,attribute,value):pass
@_f
@_p.types(ctypes.c_char_p,ctypes.POINTER(_cs.Display),_cs.c_int,_cs.c_int,_cs.c_int)
def glXQueryRendererStringMESA(dpy,screen,renderer,attribute):pass
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
a585065a3adc8bc699bf8ba1c78b67358d1ea23c
|
c99c272181eb43df688cc6af10bfb17659014ab9
|
/03_ОOP-Python/01-Defining Classes/02_Exercise/07_GuildSystem/project/venv/Scripts/easy_install-script.py
|
479119ad1bbdbfaf2e56c4f7f55eb619444da6c2
|
[] |
no_license
|
LachezarKostov/SoftUni
|
ce89d11a4796c10c8975dc5c090edecac993cb03
|
47559e9f01f7aabd73d84aa175be37140e2d5621
|
refs/heads/master
| 2023-01-29T20:49:57.196136
| 2020-12-10T12:34:09
| 2020-12-10T12:34:09
| 283,491,508
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
#!"C:\Users\dream\Desktop\Python\OP-Python\01-Defining Classes\02_Exercise\07_GuildSystem\project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"68952160+LachezarKostov@users.noreply.github.com"
] |
68952160+LachezarKostov@users.noreply.github.com
|
d518f64f0bbd5273253b9da996adb85350151238
|
730a0291d90bf220d162791287e422bc4225d164
|
/samples/StackResult/fsmpy/StackSynchronized.py
|
ce716b3963340b6d02b4f30ab46f82112d7579f6
|
[
"BSD-3-Clause"
] |
permissive
|
jon-jacky/PyModel
|
27442d062e615bd0bf1bd16d86ae56cc4d3dc443
|
457ea284ea20703885f8e57fa5c1891051be9b03
|
refs/heads/master
| 2022-11-02T14:08:47.012661
| 2022-10-16T09:47:53
| 2022-10-16T09:47:53
| 2,034,133
| 75
| 36
|
NOASSERTION
| 2021-07-11T21:15:08
| 2011-07-12T04:23:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
# pma.py Stack StackOneScenario -m 6 -o StackSynchronized
# 4 states, 6 transitions, 4 accepting states, 0 unsafe states, 0 finished and 0 deadend states
# actions here are just labels, but must be symbols with __name__ attribute
def Push(): pass
def Pop(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'StackOneScenario': 0, 'Stack': {'stack': []}},
1 : {'StackOneScenario': 0, 'Stack': {'stack': [1]}},
2 : {'StackOneScenario': 0, 'Stack': {'stack': [1, 1]}},
3 : {'StackOneScenario': 0, 'Stack': {'stack': [1, 1, 1]}},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 2, 3]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Push, (1,), None), 1),
(1, (Pop, (), 1), 0),
(1, (Push, (1,), None), 2),
(2, (Pop, (), 1), 1),
(2, (Push, (1,), None), 3),
(3, (Pop, (), 1), 2),
)
|
[
"jon@u.washington.edu"
] |
jon@u.washington.edu
|
a914ff8c2d0018797ec75f0eb379efac9c21abef
|
c0a5ff5f77943a9529512e6b27148f3318ab5264
|
/vowels2.py
|
9fe9e3f321664f0f5ebedae52821be5fdb7ac104
|
[] |
no_license
|
smatthewenglish/head_first_python
|
b15cc7260fa6607759778ac37d86006f803462a9
|
6e783ce541d5462fb2f84cc901c713fcf5895240
|
refs/heads/master
| 2023-03-28T14:50:16.857613
| 2021-03-31T16:41:14
| 2021-03-31T16:41:14
| 350,149,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
vowels = ['a', 'e', 'i', 'o', 'u']
#word = "Milliways"
word = input("Provide a word to search for vowels: ")
found = []
for letter in word:
if letter in vowels:
#print(letter)
if letter not in found:
found.append(letter)
for vowel in found:
print(vowel)
|
[
"s.matthew.english@gmail.com"
] |
s.matthew.english@gmail.com
|
bf7cccfc45cdf2461987920d5a0b5fcb107fe227
|
5488617b1b05c436b1f8c8642ea75ca754719f8d
|
/TW_study/LimitCode/tW_measurment/mlfitNormsToText.py
|
7cbfc9933869090ddc8caf40ffac104930662672
|
[] |
no_license
|
wenxingfang/TW_Top
|
fdb1ba136be6ace8fdacaade58cb4ca4fcdc3c9e
|
389e76c904d08a59d9141b9b66ec15d2583f8e9a
|
refs/heads/master
| 2021-02-05T06:54:27.908688
| 2020-02-28T13:24:00
| 2020-02-28T13:24:00
| 243,754,087
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import re
from sys import argv, stdout, stderr, exit
# import ROOT with a fix to get batch mode (http://root.cern.ch/phpBB3/viewtopic.php?t=3198)
argv.append( '-b-' )
import ROOT
ROOT.gROOT.SetBatch(True)
argv.remove( '-b-' )
if len(argv) == 0: raise RuntimeError, "Usage: mlfitNormsToText.py [ -u ] mlfit.root";
errors = False
if len(argv) > 2 and argv[1] == "-u":
errors = True
argv[1] = argv[2];
file = ROOT.TFile.Open(argv[1]);
prefit = file.Get("norm_prefit")
fit_s = file.Get("norm_fit_s")
fit_b = file.Get("norm_fit_b")
if prefit == None: stderr.write("Missing fit_s in %s. Did you run MaxLikelihoodFit in a recent-enough version of combine and with --saveNorm?\n" % file);
if fit_s == None: raise RuntimeError, "Missing fit_s in %s. Did you run MaxLikelihoodFit with --saveNorm?" % file;
if fit_b == None: raise RuntimeError, "Missing fit_b in %s. Did you run MaxLikelihoodFit with --saveNorm?" % file;
iter = fit_s.createIterator()
while True:
norm_s = iter.Next()
if norm_s == None: break;
norm_b = fit_b.find(norm_s.GetName())
norm_p = prefit.find(norm_s.GetName()) if prefit else None
m = re.match(r"(\w+)/(\w+)", norm_s.GetName());
if m == None: m = re.match(r"n_exp_(?:final_)?(?:bin)+(\w+)_proc_(\w+)", norm_s.GetName());
if m == None: raise RuntimeError, "Non-conforming object name %s" % norm_s.GetName()
if norm_b == None: raise RuntimeError, "Missing normalization %s for background fit" % norm_s.GetName()
if prefit and norm_p and errors:
print "%-30s %-30s %7.3f +/- %7.3f %7.3f +/- %7.3f %7.3f +/- %7.3f" % (m.group(1), m.group(2), norm_p.getVal(), norm_p.getError(), norm_s.getVal(), norm_s.getError(), norm_b.getVal(), norm_b.getError())
else:
if errors:
print "%-30s %-30s %7.3f +/- %7.3f %7.3f +/- %7.3f" % (m.group(1), m.group(2), norm_s.getVal(), norm_s.getError(), norm_b.getVal(), norm_b.getError())
else:
print "%-30s %-30s %7.3f %7.3f" % (m.group(1), m.group(2), norm_s.getVal(), norm_b.getVal())
|
[
"wenxing.fang@cern.ch"
] |
wenxing.fang@cern.ch
|
234f9d0be069bd885e1b1e25db82bd2eb4e0e97e
|
d765d19f80a6bfed71685838306f2d91f6a5a7dd
|
/rdt/rdt21.py
|
0c2bba984d0b3daf478d990edda454a24d739487
|
[] |
no_license
|
EliasFarhan/CompNet
|
1f1f83e6babdb688e1d626117cdb50a642a9d2a9
|
c95b36c12a7a0a0d0ac5ecdb41e1b227c3973de0
|
refs/heads/master
| 2021-07-16T20:33:56.803384
| 2020-09-15T18:54:18
| 2020-09-15T18:54:18
| 210,541,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
from rdt.base import *
from rdt.rdt20 import ChannelRdt20
class SenderRdt21(Sender):
last_packet = ""
sequence_nmb = 1
msg_lock = threading.Lock()
def send_data(self, data, resend=False):
if not resend:
self.msg_lock.acquire()
self.last_packet = data
text_data = data.encode()
packet = bytearray(len(text_data) + 2)
packet[1] = self.sequence_nmb.to_bytes(8, byteorder='little')[0]
check_sum = 0
for byte in text_data:
check_sum += byte
check_sum += packet[1]
packet[0] = check_sum.to_bytes(8, byteorder="little")[0]
packet[2:len(text_data) + 2] = text_data
self.channel.send_msg(packet)
def receive_response(self, response):
check_sum = 0
for byte in response[0:2]:
check_sum += byte
if check_sum.to_bytes(8, byteorder='little')[0] != response[3]:
print("[Error] Bad response checksum : need to send the last packet again: "+self.last_packet)
self.send_data(self.last_packet, resend=True)
return
if b"ACK" in response:
print("[ACK] Packet went well")
self.sequence_nmb += 1
self.msg_lock.release()
elif b"NAK" in response:
print("[NAK] Need to send packet again")
self.send_data(self.last_packet, resend=True)
else:
print("[Error] Bad response : need to send the last packet again")
self.send_data(self.last_packet, resend=True)
class ReceiverRdt21(Receiver):
sequence_number = 0
def receive_data(self, data):
check_sum = data[0]
sequence_nmb = data[1]
text_data = data[2:]
byte_sum = 0
response = bytearray(4)
for byte in text_data:
byte_sum += byte
byte_sum += sequence_nmb
if byte_sum.to_bytes(8, byteorder="little")[0] == check_sum:
if self.sequence_number != sequence_nmb:
super().receive_data(text_data)
self.sequence_number = sequence_nmb
response[0:2] = b"ACK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
else:
response[0:2] = b"NAK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
def send_response(self, response):
super().send_response(response)
def main():
sim = Simulation(sender=SenderRdt21(), channel=ChannelRdt20(), receiver=ReceiverRdt21())
sim.simulate()
if __name__ == "__main__":
main()
|
[
"elias.farhan@gmail.com"
] |
elias.farhan@gmail.com
|
63a3e633e544e4a017474a3cba78a6c0a93f189b
|
17070ea982156a8553c24e2ea3b687fb1dc5544e
|
/shop/views.py
|
02cd002f7c32aecc9a6deff58f0d5b489658af0a
|
[] |
no_license
|
akiyoko/django-concurrency-sample
|
75353fe55e0376e08f2c888b5feb323f9728fc1a
|
8b9fd1e04a034cb0d8e6d1915d864b13b1726608
|
refs/heads/main
| 2023-01-22T10:49:39.375878
| 2020-12-01T05:17:53
| 2020-12-01T05:17:53
| 317,429,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
import logging
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.db import transaction
# from django.http.response import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
from .models import Book, BookStock, Order
logger = logging.getLogger(__name__)
User = get_user_model()
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class CheckoutView(View):
def get(self, request, *args, **kwargs):
book = get_object_or_404(Book, pk=kwargs['pk'])
book_stock = get_object_or_404(BookStock, book=book)
if book_stock.quantity == 0:
messages.error(request, "在庫がないので購入できません。")
context = {
'book': book,
'book_stock': book_stock,
}
return TemplateResponse(request, 'shop/checkout.html', context)
def post(self, request, *args, **kwargs):
# # TODO: ログイン状態をシミュレート
# request.user = User(pk=1)
book = get_object_or_404(Book, pk=kwargs['pk'])
# 1) デフォルト
# 2) ATOMIC_REQUESTS を有効化
# # ① 注文情報を登録
# order = Order(
# status=Order.STATUS_PAYMENT_PROCESSING,
# total_amount=book.price,
# ordered_by=request.user,
# )
# order.save()
#
# # ② 在庫数を確認
# book_stock = get_object_or_404(BookStock, book=book)
# # ③ 在庫数を1減らして更新
# book_stock.quantity -= 1
# book_stock.save()
#
# # 決済処理
# try:
# print('決済処理')
# # TODO
# # raise Exception("決済処理で例外発生")
# except Exception as e:
# # 在庫を1つ増やして更新
# book_stock = get_object_or_404(BookStock, book=book)
# book_stock.quantity += 1
# book_stock.save()
#
# # 注文情報のステータスを更新
# order.status = Order.STATUS_PAYMENT_NG
# order.save()
#
# messages.error(request, "決済NGです。")
# return TemplateResponse(request, 'shop/checkout_error.html')
#
# # ④ 注文情報のステータスを更新
# order.status = Order.STATUS_PAYMENT_OK
# order.save()
# 3) transaction.atomic() で囲む
# 4) ATOMIC_REQUESTS を有効化しているときに、特定のメソッド内で自前でトランザクションを切る
with transaction.atomic():
# ① 注文情報を登録
order = Order(
status=Order.STATUS_PAYMENT_PROCESSING,
total_amount=book.price,
ordered_by=request.user,
)
order.save()
# ② 在庫数を確認
book_stock = get_object_or_404(BookStock, book=book)
# ③ 在庫数を1減らして更新
book_stock.quantity -= 1
book_stock.save()
# ...(決済処理)...
print('決済処理')
with transaction.atomic():
# ④ 注文情報のステータスを更新
order.status = Order.STATUS_PAYMENT_OK
order.save()
messages.info(request, "購入しました。")
if book_stock.quantity == 0:
messages.warning(request, "在庫がなくなりました。")
context = {
'book': book,
'book_stock': book_stock,
'order': order,
}
return TemplateResponse(request, 'shop/checkout.html', context)
|
[
"akiyoko@users.noreply.github.com"
] |
akiyoko@users.noreply.github.com
|
c7e0e8f56c9b540a6d37dce314d31c36ea920326
|
27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f
|
/tests/unit/modules/network/onyx/test_onyx_ospf.py
|
665633222c74febcc7f196f3e51d0f6b0b91d4fb
|
[] |
no_license
|
coll-test/notstdlib.moveitallout
|
eb33a560070bbded5032385d0aea2f3cf60e690b
|
0987f099b783c6cf977db9233e1c3d9efcbcb3c7
|
refs/heads/master
| 2020-12-19T22:28:33.369557
| 2020-01-23T18:51:26
| 2020-01-23T18:51:26
| 235,865,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,664
|
py
|
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.tests.unit.compat.mock import patch
from ansible_collections.notstdlib.moveitallout.plugins.modules import onyx_ospf
from ansible_collections.notstdlib.moveitallout.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxOspfModule(TestOnyxModule):
module = onyx_ospf
def setUp(self):
super(TestOnyxOspfModule, self).setUp()
self._ospf_exists = True
self.mock_get_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_config")
self.get_config = self.mock_get_config.start()
self.mock_get_interfaces_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_interfaces_config")
self.get_interfaces_config = self.mock_get_interfaces_config.start()
self.mock_load_config = patch(
'ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxOspfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._ospf_exists:
config_file = 'onyx_ospf_show.cfg'
self.get_config.return_value = load_fixture(config_file)
config_file = 'onyx_ospf_interfaces_show.cfg'
self.get_interfaces_config.return_value = load_fixture(config_file)
else:
self.get_config.return_value = None
self.get_interfaces_config.return_value = None
self.load_config.return_value = None
def test_ospf_absent_no_change(self):
set_module_args(dict(ospf=3, state='absent'))
self.execute_module(changed=False)
def test_ospf_present_no_change(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=[interface]))
self.execute_module(changed=False)
def test_ospf_present_remove(self):
set_module_args(dict(ospf=2, state='absent'))
commands = ['no router ospf 2']
self.execute_module(changed=True, commands=commands)
def test_ospf_change_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.5',
interfaces=[interface]))
commands = ['router ospf 2', 'router-id 10.2.3.5', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_remove_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, interfaces=[interface]))
commands = ['router ospf 2', 'no router-id', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_add_interface(self):
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Loopback 2', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['interface loopback 2 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
def test_ospf_remove_interface(self):
set_module_args(dict(ospf=2, router_id='10.2.3.4'))
commands = ['interface loopback 1 no ip ospf area']
self.execute_module(changed=True, commands=commands)
def test_ospf_add(self):
self._ospf_exists = False
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Vlan 210', area='0.0.0.0'),
dict(name='Eth1/1', area='0.0.0.0'),
dict(name='Po1', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['router ospf 2', 'router-id 10.2.3.4', 'exit',
'interface loopback 1 ip ospf area 0.0.0.0',
'interface vlan 210 ip ospf area 0.0.0.0',
'interface ethernet 1/1 ip ospf area 0.0.0.0',
'interface port-channel 1 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
|
[
"wk@sydorenko.org.ua"
] |
wk@sydorenko.org.ua
|
317288bb41c5c374236f56788577a76f1c080b9c
|
42fe2827d14a82043ade9393beaedf53e22a69f5
|
/bebop_ws/devel/.private/bebop_msgs/lib/python2.7/dist-packages/bebop_msgs/msg/_CommonCommonStateCurrentDateChanged.py
|
55096047d13f8e60d5b3ab4a3aa26cae99d7e236
|
[] |
no_license
|
cjbanks/bebop-software-framework
|
a3714646545e9d7d71299a365814bc87437f5e14
|
7da1bbdef4e84aa0ed793cfaad9fe133959ebe21
|
refs/heads/master
| 2023-04-30T17:52:23.255302
| 2020-11-18T18:32:41
| 2020-11-18T18:32:41
| 368,626,051
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,233
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from bebop_msgs/CommonCommonStateCurrentDateChanged.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class CommonCommonStateCurrentDateChanged(genpy.Message):
_md5sum = "7b1c2ad09d95986b33cc46dd275d6aad"
_type = "bebop_msgs/CommonCommonStateCurrentDateChanged"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# CommonCommonStateCurrentDateChanged
# auto-generated from up stream XML files at
# github.com/Parrot-Developers/libARCommands/tree/master/Xml
# To check upstream commit hash, refer to last_build_info file
# Do not modify this file by hand. Check scripts/meta folder for generator files.
#
# SDK Comment: Date changed.\n Corresponds to the latest date set on the drone.\n\n **Please note that you should not care about this event if you are using the libARController API as this library is handling the connection process for you.**
Header header
# Date with ISO-8601 format
string date
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
"""
__slots__ = ['header','date']
_slot_types = ['std_msgs/Header','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,date
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(CommonCommonStateCurrentDateChanged, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.date is None:
self.date = ''
else:
self.header = std_msgs.msg.Header()
self.date = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.date
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.date = str[start:end].decode('utf-8')
else:
self.date = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.date
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.date = str[start:end].decode('utf-8')
else:
self.date = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
|
[
"Chewie_Alex@nder1"
] |
Chewie_Alex@nder1
|
73c728462aaa1aeb1ff14b80acd3d67f327d7557
|
106983cf0b8df622f514ecff2bb2fa4c794c9dac
|
/Misc/OpenCV/camshiftTest.py
|
5677142b105f693d0656e9845a8b7bfcaa575dc3
|
[] |
no_license
|
michael5486/Senior-Design
|
2d9ae521c637abf7c0825f85b32752ad61c62744
|
6b6c78bed5f20582a9753a9c10020c709d6b6e53
|
refs/heads/master
| 2021-01-19T09:58:35.378164
| 2017-05-26T17:17:13
| 2017-05-26T17:17:13
| 67,556,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,367
|
py
|
#!/usr/bin/env python
import cv2.cv as cv
import serial
#ser = serial.Serial("/dev/ttyACM0",9600)
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
cv.NamedWindow( "Histogram", 1 )
cv.SetMouseCallback( "CamShiftDemo", self.on_mouse)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC - quit the program\n"
" b - switch to/from backprojection view\n"
"To initialize tracking, drag across the object with the mouse\n" )
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage( (320,200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv.CreateImage( (320,200), 8, 3)
cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
return histimg
def on_mouse(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == cv.CV_EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
print "hitting run section"
x = 0
while True:
#print x
#x = x + 1
frame = cv.QueryFrame( self.capture )
cv.Flip(frame, frame, 1)
# Convert to HSV and keep the hue
hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
cv.Split(hsv, self.hue, None, None, None)
# Compute back projection
backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
# Run the cam-shift
cv.CalcArrBackProject( [self.hue], backproject, hist )
if self.track_window and is_rect_nonzero(self.track_window):
crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
print self.track_window
(iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
self.track_window = rect
print self.track_window
try:
#prints the center x and y value of the tracked ellipse
coord = track_box[0]
print "center = {}".format(coord)
if (coord[0] < 320):
print "move right"
# ser.write("R")
elif (coord[0] == 320):
print "do nothing"
else:
print "move left"
# ser.write("L")
except UnboundLocalError:
print "track_box is None"
# If mouse is pressed, highlight the current selected rectangle
# and recompute the histogram
if self.drag_start and is_rect_nonzero(self.selection):
sub = cv.GetSubRect(frame, self.selection)
save = cv.CloneMat(sub)
cv.ConvertScale(frame, frame, 0.5)
cv.Copy(save, sub)
x,y,w,h = self.selection
cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))
sel = cv.GetSubRect(self.hue, self.selection )
cv.CalcArrHist( [sel], hist, 0)
(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
if max_val != 0:
cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
elif self.track_window and is_rect_nonzero(self.track_window):
print track_box
cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
if not backproject_mode:
cv.ShowImage( "CamShiftDemo", frame )
else:
cv.ShowImage( "CamShiftDemo", backproject)
cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))
c = cv.WaitKey(7) % 0x100
if c == 27:
break
elif c == ord("b"):
backproject_mode = not backproject_mode
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
cv.DestroyAllWindows()
|
[
"michael5486@gmail.com"
] |
michael5486@gmail.com
|
47befcf66e46b26472ad8cb956c2fc14284c7c9e
|
3794bc772676d34a6794d19eedb41c2d8a7d39c0
|
/ge_dqn/monitor.py
|
53024700f3b5ca11545565d3ad057f2807cd0141
|
[] |
no_license
|
geyang/reinforcement_learning_learning_notes
|
3a79af021b6b126e37b09bf1871cfe9852690abe
|
f862dbf496f7f5d6cb091604dfb808511de5aa9c
|
refs/heads/master
| 2021-08-23T11:32:14.127137
| 2017-12-04T18:28:35
| 2017-12-04T18:28:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
from copy import deepcopy
import numpy
def contextify(env):
type(env).__enter__ = lambda s: s
type(env).__exit__ = lambda s, *args: s.close()
return env
def monitor(env):
episode_rewards = []
_step = env.step
def step(action):
s, rew, done, info = _step(action)
episode_rewards.append(rew)
if not done:
return s, rew, done, info
episode_info = dict(
total_reward=sum(episode_rewards),
average_reward=numpy.mean(episode_rewards),
timesteps=len(episode_rewards)
)
episode_rewards.clear()
if type(info) is list:
info = deepcopy(info) + [episode_info]
elif type(info) is tuple:
info = tuple(*deepcopy(info), *episode_info)
elif hasattr(info, 'update'):
info = deepcopy(info)
info.update(**episode_info)
return s, rew, done, info
env.step = step
return env
|
[
"yangge1987@gmail.com"
] |
yangge1987@gmail.com
|
815fb3177d93f4c5b3da4d57786399655d7a5e2b
|
493a36f1f8606c7ddce8fc7fe49ce4409faf80be
|
/.history/B073040023/client_20210614185342.py
|
411412020365d07802e69305599262f66838a62f
|
[] |
no_license
|
ZhangRRz/computer_network
|
f7c3b82e62920bc0881dff923895da8ae60fa653
|
077848a2191fdfe2516798829644c32eaeded11e
|
refs/heads/main
| 2023-05-28T02:18:09.902165
| 2021-06-15T06:28:59
| 2021-06-15T06:28:59
| 376,568,344
| 0
| 0
| null | 2021-06-13T14:48:36
| 2021-06-13T14:48:36
| null |
UTF-8
|
Python
| false
| false
| 5,078
|
py
|
import socket
import threading
import tcppacket
import struct
from time import sleep
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(msg):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg.encode('utf-8')
print(data)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
#Calculation
print("Demo calculation function")
init_new_calc_req("calc 2 + 6")
sleep(0.25)
init_new_calc_req("calc 2 - 6")
sleep(0.25)
init_new_calc_req("calc 2 * 6")
sleep(0.25)
init_new_calc_req("calc 2 / 6")
sleep(0.25)
init_new_calc_req("calc 2 ^ 6")
sleep(0.25)
init_new_calc_req("calc 16 sqrt")
sleep(0.25)
# threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
# threads[-1].start()
# for i in range(1):
# threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
# threads[-1].start()
# for i in range(1):
# threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
# threads[-1].start()
|
[
"tom95011@gmail.com"
] |
tom95011@gmail.com
|
ac61d410d9419c6949dc2e7bb0e4fd3b37e85afe
|
2b7efe276d1dfdc70a4b5cd59ae863b7b7a1bd58
|
/euler35.py
|
24b79c529bb65377213bed68a3834c21df6f4544
|
[] |
no_license
|
mckkcm001/euler
|
550bbd126e8d9bb5bc7cb854147399060f865cfc
|
8cf1db345b05867d47921b01e8c7e4c2df4ee98d
|
refs/heads/master
| 2021-01-01T17:43:28.799946
| 2017-11-07T02:17:34
| 2017-11-07T02:17:34
| 18,375,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
import math
n = [2]
def is_prime(n):
if n % 2 == 0 and n > 2:
return False
for i in range(3, int(math.sqrt(n)) + 1, 2):
if n % i == 0:
return False
return True
def is_circ(n):
a = n
for i in range(len(str(n))):
a = 10**(len(str(a))-1)*(a%10)+ a//10
if not is_prime(a):
return False
return True
for i in range(3,1000000,2):
if i%10 == 0:
continue
if is_circ(i):
n.append(i)
print(len(n))
|
[
"noreply@github.com"
] |
mckkcm001.noreply@github.com
|
320aa009bc8015194f321089be13615ebf99be42
|
8b83d79425985e9c87ff4b641c2dcb6a151f3aa1
|
/recipes/templatetags/markdown.py
|
75cbd8e9f44234b8d253b147c3548fd001844065
|
[] |
no_license
|
akx/pyttipannu
|
e1366d982bae62a70da24b7da1a93c40efb51217
|
7b02f7d18d594beddb64beb99283c738ca06b8f0
|
refs/heads/master
| 2021-01-13T09:15:59.086825
| 2016-09-29T13:52:17
| 2016-09-29T13:52:17
| 69,023,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django.template import Library
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from markdown import markdown as render_markdown
register = Library()
@register.filter
def markdown(s):
return mark_safe(render_markdown(force_text(s)))
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
dfa802d2eab75f6143932b9db16d2742cd829829
|
84ee74894d1e6d76281dd1d3b76ee1dcde0d36b5
|
/plotting/visualisePhi.py
|
1c7e4c4f7e5da898e37f21b143394c229a9fa1a5
|
[] |
no_license
|
pyccel/pygyro
|
e3f13e5679b37a2dfebbd4b10337e6adefea1105
|
a8562e3f0dd8fd56159785e655f017bbcae92e51
|
refs/heads/master
| 2023-03-10T07:43:17.663359
| 2022-08-17T12:06:25
| 2022-08-17T12:06:25
| 170,837,738
| 4
| 3
| null | 2023-01-02T10:09:08
| 2019-02-15T09:27:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
import argparse
from mpi4py import MPI
import numpy as np
from pygyro.model.grid import Grid
from pygyro.model.layout import LayoutSwapper, getLayoutHandler
from pygyro.poisson.poisson_solver import DensityFinder, QuasiNeutralitySolver
from pygyro.utilities.grid_plotter import SlicePlotterNd
from pygyro.initialisation.setups import setupCylindricalGrid
from pygyro.diagnostics.norms import l2
parser = argparse.ArgumentParser(
description='Plot the intial electric potential')
parser.add_argument('const_filename', type=str,
help='The constants file describing the setup')
args = parser.parse_args()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
distribFunc, constants, t = setupCylindricalGrid(constantFile=args.const_filename,
layout='v_parallel',
comm=comm,
allocateSaveMemory=True)
nprocs = distribFunc.getLayout(distribFunc.currentLayout).nprocs[:2]
layout_poisson = {'v_parallel_2d': [0, 2, 1],
'mode_solve': [1, 2, 0]}
layout_vpar = {'v_parallel_1d': [0, 2, 1]}
layout_poloidal = {'poloidal': [2, 1, 0]}
remapperPhi = LayoutSwapper(comm, [layout_poisson, layout_vpar, layout_poloidal],
[nprocs, nprocs[0], nprocs[1]
], distribFunc.eta_grid[:3],
'mode_solve')
remapperRho = getLayoutHandler(
comm, layout_poisson, nprocs, distribFunc.eta_grid[:3])
phi = Grid(distribFunc.eta_grid[:3], distribFunc.getSpline(slice(0, 3)),
remapperPhi, 'mode_solve', comm, dtype=np.complex128)
rho = Grid(distribFunc.eta_grid[:3], distribFunc.getSpline(slice(0, 3)),
remapperRho, 'v_parallel_2d', comm, dtype=np.complex128)
density = DensityFinder(6, distribFunc.getSpline(3),
distribFunc.eta_grid, constants)
QNSolver = QuasiNeutralitySolver(distribFunc.eta_grid[:3], 7, distribFunc.getSpline(0),
constants, chi=0)
distribFunc.setLayout('v_parallel')
density.getPerturbedRho(distribFunc, rho)
QNSolver.getModes(rho)
rho.setLayout('mode_solve')
phi.setLayout('mode_solve')
QNSolver.solveEquation(phi, rho)
phi.setLayout('v_parallel_2d')
rho.setLayout('v_parallel_2d')
QNSolver.findPotential(phi)
norm = l2(distribFunc.eta_grid, remapperPhi.getLayout('v_parallel_2d'))
val = norm.l2NormSquared(phi)
print(val)
plotter = SlicePlotterNd(phi, 0, 1, True, sliderDimensions=[
2], sliderNames=['z'])
if (rank == 0):
plotter.show()
else:
plotter.calculation_complete()
|
[
"noreply@github.com"
] |
pyccel.noreply@github.com
|
0f59ddf53e19bb9c1f3b0b8ef1a3e04546cc89e4
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/brackets_20200810105706.py
|
e35d818bc5c2a83d99fa7e410edda4e403b93436
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
def brackets(S):
# "{[()()]}"
stack = []
for i in S:
stack.append(i)
for i in S:
if i == "(" and stack.pop()
print(brackets("{[()()]}"))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
5328be94b6b7b5d34270b3276badb49bfb04b4f1
|
d886f41ac037343b6b9652977f753808117e6246
|
/Behaviors/FK_Relative_Reverse_01.py
|
0a1594dbcc59fe2ead208d9d03c6eabe281422a2
|
[] |
no_license
|
TPayneExperience/TrevorPaynes_RigAndAnimSuite
|
5e918be2de896fdacf2da039815e85b91cf0d7ed
|
18e0482ca6d70277b6455d9a14e6b10406f1553f
|
refs/heads/master
| 2023-09-03T04:14:48.862905
| 2021-11-10T02:50:54
| 2021-11-10T02:50:54
| 275,663,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,254
|
py
|
import pymel.core as pm
import Abstracts.Abstract_Behavior as absBhv
import Utilities.Rig_Utilities as rigUtil
import Utilities.Logger as log
class FK_Relative_01(absBhv.Abstract_Behavior):
bhvType = 'FK Relative Reverse'
validLimbTypes = (4,) # rigData.LIMB_TYPES
groupType = 'FKR' # LookAt, IKPV...
groupShape = 'Cube_Poly'
groupCount = 1
groupMoveable = False # for moving control pivots
uiOrderIndex = 250
usesJointControls = False
usesLimbControls = True
bakeLosesData = True
duplicateJointGroups = False
def InitLimb(self, limb):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
jointGroups = pm.listConnections(limb.jointGroups)
jointGroup = rigUtil.SortGroups(jointGroups)[-1]
joint = pm.listConnections(jointGroup.joint)[0]
pm.parent(limbGroup, joint)
rigUtil.ResetAttrs(limbGroup)
pm.parent(limbGroup, limb)
def CleanupLimb(self, limb):
log.funcFileDebug()
#============= FOR BEHAVIOR OPERATION ============================
def Setup_ForBhvOp(self, limb):
pass
def Teardown_ForBhvOp(self, limb):
pass
#============= SETUP ============================
def Setup_Rig_Controls(self, limb):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
limbControl = pm.listConnections(limbGroup.control)[0]
jointGroups = pm.listConnections(limb.jointGroups)
jointGroups = rigUtil.SortGroups(jointGroups)[::-1]
controls = []
# Parent control hierarchy
for i in range(len(jointGroups)-1):
childGroup = jointGroups[i+1]
parentCtr = pm.listConnections(jointGroups[i].control)[0]
pm.parent(childGroup, parentCtr)
controls.append(parentCtr)
# Parent Root Joint group to Control
childGroup = jointGroups[0]
pm.parentConstraint(limbControl, childGroup, mo=1)
# Bind rotations
multNode = pm.createNode('multiplyDivide')
pm.connectAttr(limbControl.rotate, multNode.input1)
scalar = 1.0/max(len(controls)-2, 1)
multNode.input2.set(scalar, scalar, scalar)
for childControl in controls[1:]:
pm.connectAttr(multNode.output, childControl.rotate)
# External
parentControl = rigUtil.GetParentControl(limb)
if parentControl:
pm.parentConstraint(parentControl, limbGroup, mo=1)
def Setup_Constraint_JointsToControls(self, limb):
log.funcFileDebug()
for group in pm.listConnections(limb.jointGroups):
joint = pm.listConnections(group.joint)[0]
control = pm.listConnections(group.control)[0]
pm.parentConstraint(control, joint, mo=1)
def Setup_Constraint_ControlsToXforms(self, limb,
xforms, hasPosCst, hasRotCst, hasScaleCst):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
limbControl = pm.listConnections(limbGroup.control)[0]
xform = xforms[-1]
if hasPosCst:
pm.pointConstraint(xform, limbControl, mo=1)
if hasRotCst:
pm.orientConstraint(xform, limbControl, mo=1)
if hasScaleCst:
pm.scaleConstraint(xform, limbControl)
return [limbControl]
#============= TEARDOWN ============================
def Teardown_Rig_Controls(self, limb):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
limbControl = pm.listConnections(limbGroup.control)[0]
conversionNode = pm.listConnections(limbControl.r)[0]
multNodes = pm.listConnections(conversionNode.output)
pm.delete(multNodes) # delete mult node
groups = pm.listConnections(limb.jointGroups)
groups = rigUtil.SortGroups(groups)[:-1]
pm.parent(groups, limb)
if pm.listConnections(limb.limbParent):
group = rigUtil.GetLimbGroups(limb, self.groupType)[0]
cst = pm.listRelatives(group, c=1, type='parentConstraint')
pm.delete(cst)
def Teardown_Constraint_JointsToControls(self, limb):
log.funcFileDebug()
jointGroups = pm.listConnections(limb.jointGroups)
joints = [pm.listConnections(g.joint)[0] for g in jointGroups]
for joint in joints:
cst = pm.listRelatives(joint, c=1, type='parentConstraint')
pm.delete(cst)
def Teardown_Constraint_ControlsToXforms(self, limb):
log.funcFileDebug()
group = rigUtil.GetLimbGroups(limb, self.groupType)[0]
control = pm.listConnections(group.control)[0]
pm.delete(pm.listRelatives(control, c=1, type='constraint'))
#============= EDITABLE UI ============================
def Setup_Behavior_Limb_UI(self, limb):
log.funcFileDebug()
return False
#============= ANIMATION UI ============================
def Setup_AnimationTools_Limb_UI(self, limb):
return False # return if UI is enabled
# Copyright (c) 2021 Trevor Payne
# See user license in "PayneFreeRigSuite\Data\LicenseAgreement.txt"
|
[
"crashandexplode@hotmail.com"
] |
crashandexplode@hotmail.com
|
10bf94250ae78f7e23d7e6bd2890662625883c6b
|
555002c30895a1e2267d05d67d5167275ade3845
|
/server/server.py
|
d2f825a62b33cfc1b7403d77eceaecff86615fcd
|
[] |
no_license
|
odbite/jkpghack2016
|
159b2938fd8ab7a2a815c664a38c791f2fb440ec
|
8b4f5b3ec555f3436f764c2b49927c200ff335a4
|
refs/heads/master
| 2021-01-10T05:52:52.600618
| 2016-02-27T17:41:07
| 2016-02-27T17:41:07
| 52,673,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
from animals import AnimalApi
from flask import Flask, render_template
from flask_restful import Api
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
static_folder = os.path.join(BASE_DIR, 'client', 'app', 'dist')
print(static_folder)
app = Flask(__name__, template_folder='../client/app', static_path='/static', static_folder=static_folder)
api = Api(app)
api.add_resource(AnimalApi, '/api/animals')
@app.route("/")
def hello():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
|
[
"draso.odin@gmail.com"
] |
draso.odin@gmail.com
|
9f4802a0adb12e9e53c888ddc1d995e8c04f2963
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/web/get_static_site_user_provided_function_app_for_static_site.py
|
f4136b23143201325f3c527173a8c7c478e1d846
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 5,754
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetStaticSiteUserProvidedFunctionAppForStaticSiteResult',
'AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult',
'get_static_site_user_provided_function_app_for_static_site',
]
@pulumi.output_type
class GetStaticSiteUserProvidedFunctionAppForStaticSiteResult:
"""
Static Site User Provided Function App ARM resource.
"""
def __init__(__self__, created_on=None, function_app_region=None, function_app_resource_id=None, id=None, kind=None, name=None, type=None):
if created_on and not isinstance(created_on, str):
raise TypeError("Expected argument 'created_on' to be a str")
pulumi.set(__self__, "created_on", created_on)
if function_app_region and not isinstance(function_app_region, str):
raise TypeError("Expected argument 'function_app_region' to be a str")
pulumi.set(__self__, "function_app_region", function_app_region)
if function_app_resource_id and not isinstance(function_app_resource_id, str):
raise TypeError("Expected argument 'function_app_resource_id' to be a str")
pulumi.set(__self__, "function_app_resource_id", function_app_resource_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> str:
"""
The date and time on which the function app was registered with the static site.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="functionAppRegion")
def function_app_region(self) -> Optional[str]:
"""
The region of the function app registered with the static site
"""
return pulumi.get(self, "function_app_region")
@property
@pulumi.getter(name="functionAppResourceId")
def function_app_resource_id(self) -> Optional[str]:
"""
The resource id of the function app registered with the static site
"""
return pulumi.get(self, "function_app_resource_id")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult(GetStaticSiteUserProvidedFunctionAppForStaticSiteResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStaticSiteUserProvidedFunctionAppForStaticSiteResult(
created_on=self.created_on,
function_app_region=self.function_app_region,
function_app_resource_id=self.function_app_resource_id,
id=self.id,
kind=self.kind,
name=self.name,
type=self.type)
def get_static_site_user_provided_function_app_for_static_site(function_app_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult:
"""
Static Site User Provided Function App ARM resource.
API Version: 2020-12-01.
:param str function_app_name: Name of the function app registered with the static site.
:param str name: Name of the static site.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['functionAppName'] = function_app_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web:getStaticSiteUserProvidedFunctionAppForStaticSite', __args__, opts=opts, typ=GetStaticSiteUserProvidedFunctionAppForStaticSiteResult).value
return AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult(
created_on=__ret__.created_on,
function_app_region=__ret__.function_app_region,
function_app_resource_id=__ret__.function_app_resource_id,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
type=__ret__.type)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
51f1291b2afb40a6c8d8781e7bc461ba3d058225
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03838/s497427946.py
|
53bb3ecb9f9567bfcac1e11d066677c406a3138b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
x, y = map(int, input().split())
cnt = 0
diff = abs(x) - abs(y)
if diff == 0:
if x * y < 0:
cnt += 1
elif diff > 0:
cnt += diff
if x > 0:
cnt += 1
if y > 0:
cnt += 1
else:
cnt += -diff
if x < 0:
cnt += 1
if y < 0:
cnt += 1
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
83fa096992b60ee9f25862dd01b9c52b2c6c1ea5
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3_neat/16_0_3_JawBone_main.py
|
9103e18ea816c4880314a942a1d0134a68bf0711
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
def cpaf(rn):
for divisor in xrange(2, 100):
if not rn % divisor:
return (False, divisor)
return (True, 1)
def baseconverter(rn, basefrom):
digits = "0123456789"
result = ""
while True:
remains = rn % basefrom
result = digits[remains] + result
rn = rn / basefrom
if rn == 0:
break
return result
lines = raw_input()
for question_index in xrange(1, int(lines) + 1):
length_of_jamcoin, types_of_jamcoin = [int(s) for s in raw_input().split(" ")]
answer_list = []
count = 0
for index in xrange(1, pow(2, length_of_jamcoin)):
inside = baseconverter(index, 2)
if len(str(inside)) < length_of_jamcoin - 1:
result = str(inside).zfill(length_of_jamcoin - 2)
temp_testcase = '1' + result + '1'
answers = temp_testcase
for i in xrange(2, 11):
temp = cpaf(int(temp_testcase, i))
if not temp[0]:
answers += ' ' + str(temp[1])
if answers.count(' ') >= 9:
answer_list.append(answers)
if len(answer_list) >= types_of_jamcoin:
break
print 'Case #1:'
for ans in answer_list:
print ans
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
e1d28343bba645d8be668da7b073af3541987896
|
383d711b269aa42ec051a8300f9bad8cd3384de8
|
/docker/models.py
|
718aa7f04973c627897a573e40c8adb538b13cc7
|
[] |
no_license
|
Lupino/docker-server
|
7af8dab451528704f470a19ae07fbd99afb47435
|
4a199e7e75dcf5ba5161a5373214bb03e8e2cf25
|
refs/heads/master
| 2021-01-10T19:30:42.888559
| 2014-04-01T07:23:22
| 2014-04-01T07:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,867
|
py
|
from docker.conf import prefix
from lee import Model, query, Table, conf as lee_conf
from docker.logging import logger
class _Container(Model):
table_name = '{}container'.format(prefix)
columns = [
{'name': 'container_id', 'type': 'str', 'primary': True, 'length': 32},
{'name': 'image_id', 'type': 'str', 'length': 32},
{'name': 'passwd', 'type': 'str', 'length': 32},
{'name': 'ssh_port', 'type': 'int', 'unsigned': True, 'length': 5, 'default': 0},
{'name': 'server_port', 'type': 'int', 'unsigned': True, 'length': 5, 'default': 0},
{'name': 'created_at', 'type': 'int', 'unsigned': True, 'length': 10, 'default': 0},
{'name': 'stop_at', 'type': 'int', 'unsigned': True, 'length': 10, 'default': 0},
]
Container = Table(_Container)
class _UserContainer(Model):
table_name = '{}user_container'.format(prefix)
columns = [
{'name': 'user_id', 'type': 'int', 'length': 10, 'unsigned': True, 'primary': True},
{'name': 'container_id', 'type': 'str', 'length': 32, 'primary': True, 'unique': True}
]
UserContainer = Table(_UserContainer)
class _User(Model):
table_name = '{}user'.format(prefix)
columns = [
{'name': 'user_id', 'type': 'int', 'length': 10, 'unsigned': True, 'primary': True, 'auto_increment': True},
{'name': 'username', 'type': 'str', 'length': 50, 'unique': True},
{'name': 'passwd', 'type': 'str', 'length': 32},
{'name': 'email', 'type': 'str', 'length': 100, 'unique': True}
]
User = Table(_User)
class Sequence(Model):
table_name = 'sequence'
columns = [
{'name': 'name', 'type': 'str', 'primary': True, 'length': 20},
{'name': 'id', 'type': 'int', 'default': 0}
]
@query(autocommit=True)
def next(self, name, cur):
name = '{}:{}'.format(prefix, name)
last_id = 0
if lee_conf.use_mysql:
sql = 'INSERT INTO `sequence` (`name`) VALUES (?) ON DUPLICATE KEY UPDATE `id` = LAST_INSERT_ID(`id` + 1)'
args = (name, )
logger.debug('Query> SQL: %s | ARGS: %s'%(sql, args))
cur.execute(sql, args)
last_id = cur.lastrowid
else:
seq = self._table.find_by_id(name)
if seq:
sql = 'UPDATE `sequence` SET `id` = `id` + 1 WHERE `name` = ?'
args = (name, )
logger.debug('Query> SQL: %s | ARGS: %s'%(sql, args))
cur.execute(sql, args)
else:
self._table.save({'name': name})
seq = self._table.find_by_id(name)
last_id = seq['id']
return last_id
def save(self, name, id):
name = '{}:{}'.format(prefix, name)
return self._table.save({'name': name, 'id': id})
seq = Table(Sequence)()
|
[
"lmjubuntu@gmail.com"
] |
lmjubuntu@gmail.com
|
454d744eedb4d7ef6400ff1daf55405c7d179bc0
|
feb2ad26f596045ddccf8a36b514fb0460a37e01
|
/expression_data/data/models.py
|
dcdcdcd4988deac32f133e4a6f8e228f877dc7bc
|
[
"BSD-2-Clause"
] |
permissive
|
lamarck2008/expression-data-server
|
9a06de7bd3f69cfe92dcf9d7400715e8096d2c1c
|
7f70fd5d5a9569a315716c389f828b17a487fdbc
|
refs/heads/master
| 2021-01-16T20:24:14.289633
| 2012-11-19T02:52:06
| 2012-11-19T02:52:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,797
|
py
|
'''These models control the data saved into the database for a given experiment.
There is a generic base class named Data, which is then further subclassed into specific data models.
'''
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from genes.models import Gene
class BaseData(models.Model):
'''This is the abstract base class for all data objects.
This model contains data for a given :class:`~experiments.models.mRNASeqExperiment` or :class:`~experiments.models.MicroArrayExperiment`.
The experiment is defined by a Generic ForeignKey to one of those two :class:`~experiments.models.Experiment` objects.
'''
#These fields control the foreignkey to the experiment.
experiment_type_choices = models.Q(app_label = 'experiments', model = 'mrnaseqexperiment') | models.Q(app_label = 'experiments', model = 'microarrayexperiment')
experiment_type = models.ForeignKey(ContentType, limit_choices_to = experiment_type_choices, help_text="Experiment Type")
experiment_id = models.PositiveIntegerField()
experiment = generic.GenericForeignKey('experiment_type', 'experiment_id')
gene = models.ForeignKey(Gene, help_text="The gene for these data.")
def __unicode__(self):
'''The unicode representation is the name.'''
return "%s" % self.gene
class Meta:
'''This is an abstract model.'''
abstract = True
class GeneExperimentData(BaseData):
'''These data are for gene-level data, aggregated per experiment.
These data can be used with :class:`~experiments.models.mRNASeqExperiment` or :class:`~experiments.models.MicroArrayExperiment` experiments.
This is an extension of the abstract base model :class:`data.models.BaseData`.
The fields in this model are based on the columns in the gene_exp.diff from cufflinks. See http://cufflinks.cbcb.umd.edu/manual.html#cuffdiff_output for more details.
The required fields are **gene**, **experiment**, **fold_change**, **p_value** and **q_value**.
'''
locus = models.CharField(max_length=20, blank=True, null=True, help_text="Chromosomal location of this gene.")
internal_id = models.CharField(max_length=20, blank=True, null=True, help_text="The probe id, or internal identification code for this gene.")
sample_1 = models.CharField(max_length=20, blank=True, null=True, help_text="The name of the first group in the comparason.")
sample_2 = models.CharField(max_length=20, blank=True, null=True, help_text="The name of the second group in the comparason.")
amount_1 = models.DecimalField(max_digits=15, decimal_places=6, blank=True, null=True, help_text="The amount in the first group.")
amount_2 = models.DecimalField(max_digits=15, decimal_places=6, blank=True, null=True, help_text="The amount in the second group.")
status = models.CharField(max_length=20, blank=True, null=True, help_text="The status code of the test.")
fold_change = models.FloatField(help_text="The log(2) fold change.")
test_statistic = models.FloatField(blank=True, null=True, help_text="The value of the test statistic used to compute significance.")
p_value = models.DecimalField(max_digits=9, decimal_places=8, help_text="Unadjusted p-value.")
q_value = models.DecimalField(max_digits=9, decimal_places=8, help_text="Multiple Comparason Adjusted p-value (Typically FDR)")
significant = models.CharField(max_length=3, blank=True, null=True, help_text="Is the q-value < 0.05?")
class Meta:
'''Updated the verbose name of the datum.'''
verbose_name_plural = 'Experiment Level Data for a Gene'
verbose_name = 'Experiment Level Datum for a Gene'
|
[
"dave.bridges@gmail.com"
] |
dave.bridges@gmail.com
|
f4b7ae8e9946c91cded7fe2092eda6da7b6a3cdf
|
4090d8b4e8e9e28d620d222651c73a12a753be36
|
/contextadv/migrations/0006_alter_contextadvertisementdescription_description.py
|
d762b37198cad99a6353794de8fe7074771fc939
|
[] |
no_license
|
isaev4lex/220studio
|
91aa08f9d10ff55e98effe2542e26799efb6e2f2
|
6188403eeed7ee590b21da15c67af9e6f06ab06b
|
refs/heads/main
| 2023-08-20T07:14:18.203593
| 2021-10-31T07:24:19
| 2021-10-31T07:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
# Generated by Django 3.2.4 on 2021-08-05 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contextadv', '0005_metatags'),
]
operations = [
migrations.AlterField(
model_name='contextadvertisementdescription',
name='description',
field=models.TextField(verbose_name='Описание инструмента\n\n(для переноса строки использовать <br>)'),
),
]
|
[
"FWorld21@protonmail.com"
] |
FWorld21@protonmail.com
|
358be1517a6567c187fc0c758e6e8ce6b61d5ae6
|
0a1356b97465cc1d5c3f661f61b3b8c51fb05d46
|
/android_binding/.buildozer/android/platform/build-armeabi-v7a/build/other_builds/hostpython3/desktop/hostpython3/Tools/msi/make_zip.py
|
58f3b15ef8524e3b3487ec688380a8d5b9de0e2c
|
[
"GPL-1.0-or-later",
"Python-2.0",
"MIT"
] |
permissive
|
Rohan-cod/cross_platform_calc
|
00360f971e4da68dd36d6836c9ddbb157f6b77d5
|
5785a5e8150d174019b330c812e7eb012cc4dd79
|
refs/heads/master
| 2022-12-22T10:29:05.317051
| 2021-06-05T10:52:44
| 2021-06-05T10:52:44
| 237,465,912
| 2
| 1
|
MIT
| 2022-12-09T05:18:55
| 2020-01-31T16:07:31
|
C
|
UTF-8
|
Python
| false
| false
| 7,729
|
py
|
import argparse
import py_compile
import re
import sys
import shutil
import stat
import os
import tempfile
from itertools import chain
from pathlib import Path
from zipfile import ZipFile, ZIP_DEFLATED
TKTCL_RE = re.compile(r'^(_?tk|tcl).+\.(pyd|dll)', re.IGNORECASE)
DEBUG_RE = re.compile(r'_d\.(pyd|dll|exe|pdb|lib)$', re.IGNORECASE)
PYTHON_DLL_RE = re.compile(r'python\d\d?\.dll$', re.IGNORECASE)
DEBUG_FILES = {
'_ctypes_test',
'_testbuffer',
'_testcapi',
'_testconsole',
'_testimportmultiple',
'_testmultiphase',
'xxlimited',
'python3_dstub',
}
EXCLUDE_FROM_LIBRARY = {
'__pycache__',
'idlelib',
'pydoc_data',
'site-packages',
'tkinter',
'turtledemo',
}
EXCLUDE_FROM_EMBEDDABLE_LIBRARY = {
'ensurepip',
'venv',
}
EXCLUDE_FILE_FROM_LIBRARY = {
'bdist_wininst.py',
}
EXCLUDE_FILE_FROM_LIBS = {
'liblzma',
'python3stub',
}
EXCLUDED_FILES = {
'pyshellext',
}
def is_not_debug(p):
if DEBUG_RE.search(p.name):
return False
if TKTCL_RE.search(p.name):
return False
return p.stem.lower() not in DEBUG_FILES and p.stem.lower() not in EXCLUDED_FILES
def is_not_debug_or_python(p):
return is_not_debug(p) and not PYTHON_DLL_RE.search(p.name)
def include_in_lib(p):
name = p.name.lower()
if p.is_dir():
if name in EXCLUDE_FROM_LIBRARY:
return False
if name == 'test' and p.parts[-2].lower() == 'lib':
return False
if name in {'test', 'tests'} and p.parts[-3].lower() == 'lib':
return False
return True
if name in EXCLUDE_FILE_FROM_LIBRARY:
return False
suffix = p.suffix.lower()
return suffix not in {'.pyc', '.pyo', '.exe'}
def include_in_embeddable_lib(p):
if p.is_dir() and p.name.lower() in EXCLUDE_FROM_EMBEDDABLE_LIBRARY:
return False
return include_in_lib(p)
def include_in_libs(p):
if not is_not_debug(p):
return False
return p.stem.lower() not in EXCLUDE_FILE_FROM_LIBS
def include_in_tools(p):
if p.is_dir() and p.name.lower() in {'scripts', 'i18n', 'pynche', 'demo', 'parser'}:
return True
return p.suffix.lower() in {'.py', '.pyw', '.txt'}
BASE_NAME = 'python{0.major}{0.minor}'.format(sys.version_info)
FULL_LAYOUT = [
('/', '$build', 'python.exe', is_not_debug),
('/', '$build', 'pythonw.exe', is_not_debug),
('/', '$build', 'python{}.dll'.format(sys.version_info.major), is_not_debug),
('/', '$build', '{}.dll'.format(BASE_NAME), is_not_debug),
('DLLs/', '$build', '*.pyd', is_not_debug),
('DLLs/', '$build', '*.dll', is_not_debug_or_python),
('include/', 'include', '*.h', None),
('include/', 'PC', 'pyconfig.h', None),
('Lib/', 'Lib', '**/*', include_in_lib),
('libs/', '$build', '*.lib', include_in_libs),
('Tools/', 'Tools', '**/*', include_in_tools),
]
EMBED_LAYOUT = [
('/', '$build', 'python*.exe', is_not_debug),
('/', '$build', '*.pyd', is_not_debug),
('/', '$build', '*.dll', is_not_debug),
('{}.zip'.format(BASE_NAME), 'Lib', '**/*', include_in_embeddable_lib),
]
if os.getenv('DOC_FILENAME'):
FULL_LAYOUT.append(('Doc/', 'Doc/build/htmlhelp', os.getenv('DOC_FILENAME'), None))
if os.getenv('VCREDIST_PATH'):
FULL_LAYOUT.append(('/', os.getenv('VCREDIST_PATH'), 'vcruntime*.dll', None))
EMBED_LAYOUT.append(('/', os.getenv('VCREDIST_PATH'), 'vcruntime*.dll', None))
def copy_to_layout(target, rel_sources):
count = 0
if target.suffix.lower() == '.zip':
if target.exists():
target.unlink()
with ZipFile(str(target), 'w', ZIP_DEFLATED) as f:
with tempfile.TemporaryDirectory() as tmpdir:
for s, rel in rel_sources:
if rel.suffix.lower() == '.py':
pyc = Path(tmpdir) / rel.with_suffix('.pyc').name
try:
py_compile.compile(str(s), str(pyc), str(rel), doraise=True, optimize=2)
except py_compile.PyCompileError:
f.write(str(s), str(rel))
else:
f.write(str(pyc), str(rel.with_suffix('.pyc')))
else:
f.write(str(s), str(rel))
count += 1
else:
for s, rel in rel_sources:
dest = target / rel
try:
dest.parent.mkdir(parents=True)
except FileExistsError:
pass
if dest.is_file():
dest.chmod(stat.S_IWRITE)
shutil.copy(str(s), str(dest))
if dest.is_file():
dest.chmod(stat.S_IWRITE)
count += 1
return count
def rglob(root, pattern, condition):
dirs = [root]
recurse = pattern[:3] in {'**/', '**\\'}
while dirs:
d = dirs.pop(0)
for f in d.glob(pattern[3:] if recurse else pattern):
if recurse and f.is_dir() and (not condition or condition(f)):
dirs.append(f)
elif f.is_file() and (not condition or condition(f)):
yield f, f.relative_to(root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source', metavar='dir', help='The directory containing the repository root', type=Path)
parser.add_argument('-o', '--out', metavar='file', help='The name of the output archive', type=Path, default=None)
parser.add_argument('-t', '--temp', metavar='dir', help='A directory to temporarily extract files into', type=Path, default=None)
parser.add_argument('-e', '--embed', help='Create an embedding layout', action='store_true', default=False)
parser.add_argument('-b', '--build', help='Specify the build directory', type=Path, default=None)
ns = parser.parse_args()
source = ns.source or (Path(__file__).resolve().parent.parent.parent)
out = ns.out
build = ns.build or Path(sys.exec_prefix)
assert isinstance(source, Path)
assert not out or isinstance(out, Path)
assert isinstance(build, Path)
if ns.temp:
temp = ns.temp
delete_temp = False
else:
temp = Path(tempfile.mkdtemp())
delete_temp = True
if out:
try:
out.parent.mkdir(parents=True)
except FileExistsError:
pass
try:
temp.mkdir(parents=True)
except FileExistsError:
pass
layout = EMBED_LAYOUT if ns.embed else FULL_LAYOUT
try:
for t, s, p, c in layout:
if s == '$build':
fs = build
else:
fs = source / s
files = rglob(fs, p, c)
extra_files = []
if s == 'Lib' and p == '**/*':
extra_files.append((
source / 'tools' / 'msi' / 'distutils.command.bdist_wininst.py',
Path('distutils') / 'command' / 'bdist_wininst.py'
))
copied = copy_to_layout(temp / t.rstrip('/'), chain(files, extra_files))
print('Copied {} files'.format(copied))
if ns.embed:
with open(str(temp / (BASE_NAME + '._pth')), 'w') as f:
print(BASE_NAME + '.zip', file=f)
print('.', file=f)
print('', file=f)
print('# Uncomment to run site.main() automatically', file=f)
print('#import site', file=f)
if out:
total = copy_to_layout(out, rglob(temp, '**/*', None))
print('Wrote {} files to {}'.format(total, out))
finally:
if delete_temp:
shutil.rmtree(temp, True)
if __name__ == "__main__":
sys.exit(int(main() or 0))
|
[
"rohaninjmu@gmail.com"
] |
rohaninjmu@gmail.com
|
253b6652ddac0a3ffbcf6e0fd96dfc8abecaf9b8
|
a3bb97955ad28e8c83a23e4466bb5352ee86847d
|
/revision/apps/public/forms.py
|
9b3b57cd9930137d58592f723e09c96bb6e411bb
|
[] |
no_license
|
rosscdh/revision
|
23ac75385cca5b44032ff2637eb635fa954bb2ec
|
090fb2a82072c5570d89878c6f506dd22d5c5ed5
|
refs/heads/master
| 2016-09-05T10:53:33.652493
| 2014-11-29T10:57:41
| 2014-11-29T10:57:41
| 23,582,177
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,484
|
py
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse_lazy
from parsley.decorators import parsleyfy
from crispy_forms.helper import FormHelper, Layout
from crispy_forms.layout import ButtonHolder, Div, Field, Fieldset, HTML, Submit
from revision.utils import _get_unique_username
import logging
logger = logging.getLogger('django.request')
@parsleyfy
class SignUpForm(forms.Form):
username = forms.CharField(
required=False,
widget=forms.HiddenInput
)
first_name = forms.CharField(
error_messages={
'required': "First name can't be blank."
},
label='',
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'First name'})
)
last_name = forms.CharField(
error_messages={
'required': "Last name can't be blank."
},
label='',
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Last name'})
)
email = forms.EmailField(
error_messages={
'invalid': "Email is invalid.",
'required': "Email can't be blank."
},
label='',
max_length=75,
widget=forms.EmailInput(attrs={'placeholder': 'Email address', 'autocomplete': 'off'})
)
password = forms.CharField(
error_messages={
'required': "Password can't be blank."
},
label='',
widget=forms.PasswordInput(attrs={'placeholder': 'Password'})
)
password_confirm = forms.CharField(
error_messages={
'required': "Confirm password can't be blank."
},
label='',
widget=forms.PasswordInput(attrs={'placeholder': 'Password again'})
)
t_and_c = forms.BooleanField(
error_messages={
'required': "You must agree to the Terms and Conditions."
},
initial=False,
label='I agree to the Terms and Conditions.',
required=True
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.attrs = {
'id': 'signup-form',
'parsley-validate': ''
}
self.helper.form_show_errors = False
self.helper.layout = Layout(
HTML('{% include "partials/form-errors.html" with form=form %}'),
Fieldset(
'',
Div(
Field('first_name', css_class=''),
Field('last_name', css_class=''),
css_class='form-name clearfix'
),
Field('email'),
Field('password'),
Field('password_confirm'),
Field('t_and_c', template='partials/t_and_c.html'),
),
ButtonHolder(
Submit('submit', 'Create Account')
)
)
super(SignUpForm, self).__init__(*args, **kwargs)
# Override the label with a link to the terms (can't go higher as the urls aren't loaded yet)
self.fields['t_and_c'].label = 'I agree to the <a href="%s" target="_blank">Terms and Conditions</a>.' % reverse_lazy('public:terms')
def clean_username(self):
final_username = self.data.get('email').split('@')[0]
final_username = _get_unique_username(username=final_username)
logger.info('Username %s available' % final_username)
return final_username
def clean_password_confirm(self):
password_confirm = self.cleaned_data.get('password_confirm')
password = self.cleaned_data.get('password')
if password != password_confirm:
raise forms.ValidationError("The two password fields didn't match.")
return password_confirm
def clean_email(self):
"""
Ensure the email is normalised
"""
email = User.objects.normalize_email(self.cleaned_data.get('email'))
user = User.objects.filter(email=email).first()
if user is None:
return email
else:
#
# NOTE! We cant be specific about the email in use as a message here as
# it could be used to determine if that email address exists (which it does
# and its prety clear but making the text a bit less specific may put them off)
#
raise forms.ValidationError("Sorry, but you cant use that email address.")
def save(self):
user = User.objects.create_user(self.cleaned_data.get('username'),
self.cleaned_data.get('email'),
self.cleaned_data.get('password'),
first_name=self.cleaned_data.get('first_name'),
last_name=self.cleaned_data.get('last_name'))
return user
@parsleyfy
class SignInForm(forms.Form):
email = forms.EmailField(
error_messages={
'required': "Email can't be blank."
},
label='',
widget=forms.EmailInput(attrs={'placeholder': 'Email address'})
)
password = forms.CharField(
error_messages={
'required': "Password can't be blank."
},
label='',
widget=forms.PasswordInput(attrs={'placeholder': 'Password'})
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.attrs = {
'parsley-validate': '',
}
self.helper.form_show_errors = False
self.helper.layout = Layout(
HTML('{% include "partials/form-errors.html" with form=form %}'),
Fieldset(
'',
Field('email', css_class='input-hg'),
Field('password', css_class='input-hg'),
),
ButtonHolder(
Submit('submit', 'Secure Sign In', css_class='btn btn-primary btn-lg')
)
)
super(SignInForm, self).__init__(*args, **kwargs)
def clean(self):
user = None
if 'email' in self.cleaned_data and 'password' in self.cleaned_data:
user = authenticate(username=self.cleaned_data['email'], password=self.cleaned_data['password'])
if user is None:
raise forms.ValidationError("Sorry, no account with those credentials was found.")
return super(SignInForm, self).clean()
|
[
"ross@lawpal.com"
] |
ross@lawpal.com
|
4e009c93c039eb04670636eb123f6a973e479fd8
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dbss/huaweicloudsdkdbss/v1/model/batch_delete_resource_tag_request.py
|
951851e88d7c6383d31b3e128954862b7a8c1840
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,953
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchDeleteResourceTagRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type': 'str',
'resource_id': 'str',
'body': 'ResourceTagRequest'
}
attribute_map = {
'resource_type': 'resource_type',
'resource_id': 'resource_id',
'body': 'body'
}
def __init__(self, resource_type=None, resource_id=None, body=None):
"""BatchDeleteResourceTagRequest
The model defined in huaweicloud sdk
:param resource_type: 资源类型。审计:auditInstance
:type resource_type: str
:param resource_id: 资源ID
:type resource_id: str
:param body: Body of the BatchDeleteResourceTagRequest
:type body: :class:`huaweicloudsdkdbss.v1.ResourceTagRequest`
"""
self._resource_type = None
self._resource_id = None
self._body = None
self.discriminator = None
self.resource_type = resource_type
self.resource_id = resource_id
if body is not None:
self.body = body
@property
def resource_type(self):
"""Gets the resource_type of this BatchDeleteResourceTagRequest.
资源类型。审计:auditInstance
:return: The resource_type of this BatchDeleteResourceTagRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this BatchDeleteResourceTagRequest.
资源类型。审计:auditInstance
:param resource_type: The resource_type of this BatchDeleteResourceTagRequest.
:type resource_type: str
"""
self._resource_type = resource_type
@property
def resource_id(self):
"""Gets the resource_id of this BatchDeleteResourceTagRequest.
资源ID
:return: The resource_id of this BatchDeleteResourceTagRequest.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this BatchDeleteResourceTagRequest.
资源ID
:param resource_id: The resource_id of this BatchDeleteResourceTagRequest.
:type resource_id: str
"""
self._resource_id = resource_id
@property
def body(self):
"""Gets the body of this BatchDeleteResourceTagRequest.
:return: The body of this BatchDeleteResourceTagRequest.
:rtype: :class:`huaweicloudsdkdbss.v1.ResourceTagRequest`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchDeleteResourceTagRequest.
:param body: The body of this BatchDeleteResourceTagRequest.
:type body: :class:`huaweicloudsdkdbss.v1.ResourceTagRequest`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteResourceTagRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
90ce17400257d8e886aa3c49973efb6bbe7e3d0f
|
8830831a87f35ff2628f379d8230928ec6b5641a
|
/BNPParibas/code/gbc_deviance.py
|
f947f44609ebf50d5d1c3aa5f5f6442aa072e2f5
|
[] |
no_license
|
nickmcadden/Kaggle
|
e5882c9d68a81700d8d969328d91c059a0643868
|
cbc5347dec90e4bf64d4dbaf28b8ffb362efc64f
|
refs/heads/master
| 2019-07-18T08:09:40.683168
| 2018-01-26T14:35:38
| 2018-01-26T14:35:38
| 40,735,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,263
|
py
|
import sys
import pandas as pd
import numpy as np
import scipy as sp
import xgboost as xgb
import data
import argparse
import pickle as pkl
from scipy import stats
from collections import OrderedDict
from sklearn.utils import shuffle
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.utils import shuffle
def log_loss(act, pred):
""" Vectorised computation of logloss """
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1, act)*sp.log(sp.subtract(1, pred)))
ll = ll * -1.0/len(act)
return ll
parser = argparse.ArgumentParser(description='XGBoost for BNP')
parser.add_argument('-f','--n_features', help='Number of features', type=int, default=1000)
parser.add_argument('-n','--n_rounds', help='Number of iterations', type=int, default=350)
parser.add_argument('-e','--eta', help='Learning rate', type=float, default=0.0125)
parser.add_argument('-r','--r_seed', help='Set random seed', type=int, default=3)
parser.add_argument('-b','--minbin', help='Minimum categorical bin size', type=int, default=1)
parser.add_argument('-ct','--cat_trans', help='Category transformation method', type=str, default='std')
parser.add_argument('-cv','--cv', action='store_true')
parser.add_argument('-codetest','--codetest', action='store_true')
parser.add_argument('-getcached', '--getcached', action='store_true')
parser.add_argument('-extra', '--extra', action='store_true')
m_params = vars(parser.parse_args())
# Load data
X, y, X_sub, ids = data.load(m_params)
print("BNP Parabas: classification...\n")
clf = GradientBoostingClassifier(loss='deviance', learning_rate=m_params['eta'], n_estimators=m_params['n_rounds'], subsample=1, max_features= 35, min_samples_split= 4, max_depth = 12, min_samples_leaf= 2, verbose=2, random_state=1)
if m_params['cv']:
# do cross validation scoring
kf = KFold(X.shape[0], n_folds=4, shuffle=True, random_state=1)
scr = np.zeros([len(kf)])
oob_pred = np.zeros(X.shape[0])
sub_pred = np.zeros((X_sub.shape[0], 4))
for i, (tr_ix, val_ix) in enumerate(kf):
clf.fit(X[tr_ix], y[tr_ix])
pred = clf.predict_proba(X[val_ix])
oob_pred[val_ix] = np.array(pred[:,1])
sub_pred[:,i] = clf.predict_proba(X_sub)[:,1]
scr[i] = log_loss(y[val_ix], np.array(pred[:,1]))
print('Train score is:', scr[i])
print(log_loss(y, oob_pred))
print oob_pred[1:10]
sub_pred = sub_pred.mean(axis=1)
oob_pred_filename = '../output/oob_pred_gbcdeviance_' + str(np.mean(scr))
sub_pred_filename = '../output/sub_pred_gbcdeviance_' + str(np.mean(scr))
pkl.dump(oob_pred, open(oob_pred_filename + '.p', 'wb'))
pkl.dump(sub_pred, open(sub_pred_filename + '.p', 'wb'))
preds = pd.DataFrame({"ID": ids, "PredictedProb": sub_pred})
preds.to_csv(sub_pred_filename + '.csv', index=False)
else:
X, y = shuffle(X, y)
# Train on full data
print("Training on full data")
clf.fit(X,y)
print("Creating predictions")
pred = clf.predict_proba(X_sub)
print("Saving Results.")
model_name = '../output/pred_gbcdev_' + str(m_params['n_rounds'])
preds = pd.DataFrame({"ID": ids, "PredictedProb": pred[:,1]})
preds.to_csv(model_name + '.csv', index=False)
|
[
"nmcadden@globalpersonals.co.uk"
] |
nmcadden@globalpersonals.co.uk
|
50b0d0d43f43bcda2ef5a05062a45b32b719010f
|
4bd5cdb67fdd6a6f0ceb3af025ceaf977b661273
|
/gconv_experiments/groupy/garray/D4h_array.py
|
1c0b4b98530c21dfa94b6937ccf955673ddf5fa0
|
[] |
no_license
|
andreiqv/gconv
|
93d7d313cdc78e2bfefd53820918293526fc4680
|
23f9ec62b119c64cc87f8727cc1e409a469db0f1
|
refs/heads/master
| 2020-05-07T21:05:28.840973
| 2019-04-11T23:25:31
| 2019-04-11T23:25:31
| 180,890,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,376
|
py
|
import numpy as np
from groupy.garray.finitegroup import FiniteGroup
from groupy.garray.matrix_garray import MatrixGArray
from groupy.garray.D4ht_array import D4htArray
from groupy.garray.Z3_array import Z3Array
"""
Implementation of dihedral finite group D4h, consisting of 16 elements in total.
These are the elements of C4h, with added reflection.
Int parameterisation contains an extra parameter, m (in {0, 1}) to represent this reflection.
"""
class D4hArray(MatrixGArray):
parameterizations = ['int', 'mat', 'hmat']
_g_shapes = {'int': (3,), 'mat': (3, 3), 'hmat': (4, 4)}
_left_actions = {}
_reparameterizations = {}
_group_name = 'D4h'
def __init__(self, data, p='int'):
data = np.asarray(data)
assert data.dtype == np.int
# classes OArray can be multiplied with
self._left_actions[D4hArray] = self.__class__.left_action_hmat
self._left_actions[D4htArray] = self.__class__.left_action_hmat
self._left_actions[Z3Array] = self.__class__.left_action_vec
super(D4hArray, self).__init__(data, p)
self.elements = self.get_elements()
def mat2int(self, mat_data):
'''
Transforms 3x3 matrix representation to int representation.
To handle any size and shape of mat_data, the original mat_data
is reshaped to a long list of 3x3 matrices, converted to a list of
int representations, and reshaped back to the original mat_data shape.
mat-2-int is achieved by taking the matrix, and looking up whether it
exists in the element list. If not, the matrix should be multiplied with -1
to retrieve the reflection. The resulting matrix can be looked up in the
element list, and that index can be converted to y and z.
'''
input = mat_data.reshape((-1, 3, 3))
data = np.zeros((input.shape[0], 3), dtype=np.int)
for i in xrange(input.shape[0]):
mat = input[i]
# check for reflection
if mat.tolist() not in self.elements:
mat = np.array(mat) * -1
data[i, 2] = 1
# determine z and y
index = self.elements.index(mat.tolist())
z = int(index % 4)
y = int((index - z) / 4)
data[i, 0] = y
data[i, 1] = z
data = data.reshape(mat_data.shape[:-2] + (3,))
return data
def int2mat(self, int_data):
'''
Transforms integer representation to 3x3 matrix representation.
Original int_data is flattened and later reshaped back to its original
shape to handle any size and shape of input.
'''
# rotations over y, z and reflection
y = int_data[..., 0].flatten()
z = int_data[..., 1].flatten()
m = int_data[..., 2].flatten()
data = np.zeros((len(y),) + (3, 3), dtype=np.int)
for j in xrange(len(y)):
index = (y[j] * 4) + z[j]
mat = self.elements[index]
mat = np.array(mat) * ((-1) ** m[j]) # mirror if reflection
data[j, 0:3, 0:3] = mat.tolist()
data = data.reshape(int_data.shape[:-1] + (3, 3))
return data
def _multiply(self, element, generator, times):
'''
Helper function to multiply an _element_ with a _generator_
_times_ number of times.
'''
element = np.array(element)
for i in range(times):
element = np.dot(element, np.array(generator))
return element
def get_elements(self):
'''
Function to generate a list containing elements of group D4h,
similar to get_elements() of BArray.
Elements are stored as lists rather than numpy arrays to enable
lookup through self.elements.index(x).
'''
# specify generators
g1 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]) # 180 degrees over y
g2 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) # 90 degrees over z
element_list = []
element = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) # starting point = identity matrix
for i in range(0, 2):
element = self._multiply(element, g1, i)
for j in range(0, 4):
element = self._multiply(element, g2, j)
element_list.append(element.tolist())
return element_list
class D4hGroup(FiniteGroup, D4hArray):
def __init__(self):
D4hArray.__init__(
self,
data=np.array([[i, j, m] for i in xrange(2) for j in xrange(4) for m in xrange(2)]),
p='int'
)
FiniteGroup.__init__(self, D4hArray)
def factory(self, *args, **kwargs):
return D4hArray(*args, **kwargs)
D4h = D4hGroup()
def rand(size=()):
'''
Returns an D4hArray of shape size, with randomly chosen elements in int parameterization.
'''
data = np.zeros(size + (3,), dtype=np.int)
data[..., 0] = np.random.randint(0, 2, size)
data[..., 1] = np.random.randint(0, 4, size)
data[..., 2] = np.random.randint(0, 2, size)
return D4hArray(data=data, p='int')
def identity(p='int'):
'''
Returns the identity element: a matrix with 1's on the diagonal.
'''
li = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
e = D4hArray(data=np.array(li, dtype=np.int), p='mat')
return e.reparameterize(p)
|
[
"phxv@mail.ru"
] |
phxv@mail.ru
|
ef955cf11a1cd96660828ba53df533af7add7417
|
a9b5bc48a89329aa44cb4dd63ce47a3c0dfc90ba
|
/tests/test_withings_object.py
|
8fa9c36a34257583ebac5c15851c1621aa312ca8
|
[
"MIT"
] |
permissive
|
randi120/python-withings
|
d050a263f5c500ad258072dbb3661a43dd225de3
|
016bb3cc2d62a4e2813df422829eba21530570bc
|
refs/heads/master
| 2021-01-22T13:47:49.355343
| 2014-12-26T00:47:12
| 2014-12-26T00:47:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
import time
import unittest
from datetime import datetime
from withings import WithingsObject
class TestWithingsObject(unittest.TestCase):
def test_attributes(self):
data = {
"date": "2013-04-10",
"string": "FAKE_STRING",
"integer": 55555,
"float": 5.67
}
obj = WithingsObject(data)
self.assertEqual(datetime.strftime(obj.date, '%Y-%m-%d'), data['date'])
self.assertEqual(obj.string, data['string'])
self.assertEqual(obj.integer, data['integer'])
self.assertEqual(obj.float, data['float'])
# Test time as epoch
data = {"date": 1409596058}
obj = WithingsObject(data)
self.assertEqual(time.mktime(obj.date.timetuple()), data['date'])
# Test funky time
data = {"date": "weird and wacky date format"}
obj = WithingsObject(data)
self.assertEqual(obj.date, data['date'])
|
[
"bradpitcher@gmail.com"
] |
bradpitcher@gmail.com
|
cc1454d122573184c132666c2fe8f7e97e045849
|
d8416cd4c8f532809c4c9d368d43fa773b3b198c
|
/torchsupport/flex/examples/cifar_tdre.py
|
546881ac3c571f5f93a027d84caa06030768d4c4
|
[
"MIT"
] |
permissive
|
DavidMetzIMT/torchsupport
|
a53a0d532b7542d81dc158d3d67f195cbce86bf9
|
a0ca719c820a4895e98091c52e43c5300e1a71a3
|
refs/heads/master
| 2023-05-28T21:45:09.302210
| 2021-06-14T17:30:58
| 2021-06-14T17:30:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
from functools import partial
from torchsupport.data.namedtuple import namespace
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.distributions import Normal
from torch.utils.data import Dataset
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchsupport.modules import ReZero
from torchsupport.training.samplers import Langevin
from torchsupport.utils.argparse import parse_options
from torchsupport.flex.log.log_types import LogImage
from torchsupport.flex.context.context import TrainingContext
from torchsupport.flex.data_distributions.data_distribution import DataDistribution
from torchsupport.flex.tasks.energy.density_ratio import direct_mixing, noise_contrastive_estimation, probability_surface_estimation, random_dim_mixing, tdre_mixing, tdre_step, tnce_step, independent_mixing, vp_mixing
from torchsupport.flex.training.density_ratio import telescoping_density_ratio_training
def valid_callback(args, ctx: TrainingContext=None):
ctx.log(images=LogImage(args.sample))
labels = args.prediction.argmax(dim=1)
for idx in range(10):
positive = args.sample[labels == idx]
if positive.size(0) != 0:
ctx.log(**{f"classified {idx}": LogImage(positive)})
def generate_step(energy, base, integrator: Langevin=None, ctx=None):
sample = base.sample(ctx.batch_size)
levels = torch.arange(0.0, 1.0, 0.01, device=opt.device)
for level in reversed(levels):
this_level = level * torch.ones(sample.size(0), device=sample.device)
sample = integrator.integrate(
ConditionalEnergy(energy, sample, shift=0.025), sample, this_level, None
)
result = ((sample + 1) / 2).clamp(0, 1)
ctx.log(samples=LogImage(result))
class CIFAR10Dataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
data, _ = self.data[index]
data = data + torch.randn_like(data) / 255
return 2 * data - 1, []
def __len__(self):
return len(self.data)
class Base(nn.Module):
def __init__(self):
super().__init__()
self.mean = nn.Parameter(torch.zeros(3, 1, 1))
self.logv = nn.Parameter(torch.zeros(3, 1, 1))
def sample(self, batch_size):
dist = Normal(
self.mean.expand(3, 32, 32),
self.logv.exp().expand(3, 32, 32)
)
return torch.randn(batch_size, 3, 32, 32, device=self.mean.device)#dist.rsample(sample_shape=(batch_size,))
def log_prob(self, data, condition):
return torch.zeros_like(self(data, condition)[0])
def forward(self, data, condition):
dist = Normal(self.mean, self.logv.exp())
log_p = dist.log_prob(data)
log_p = log_p.view(*log_p.shape[:-3], -1)
return log_p.sum(dim=-1, keepdim=True), namespace(
distribution=dist
)
class SineEmbedding(nn.Module):
def __init__(self, size, depth=2):
super().__init__()
self.blocks = nn.ModuleList([
nn.Linear(1, size)
] + [
nn.Linear(size, size)
for idx in range(depth - 1)
])
def forward(self, time):
out = time[:, None]
for block in self.blocks:
out = block(out).sin()
return out
class ResBlock(nn.Module):
def __init__(self, size):
super().__init__()
self.condify = SineEmbedding(2 * size)
self.skip = SineEmbedding(2 * size)
self.blocks = nn.ModuleList([
nn.Conv2d(size, size, 3, padding=1)
for idx in range(2)
])
self.zero = ReZero(size)
def forward(self, inputs, levels):
cond = self.condify(levels)
cond = cond.view(*cond.shape, 1, 1)
skip = self.skip(levels)
skip = skip.view(*skip.shape, 1, 1)
scale, bias = cond.chunk(2, dim=1)
skip_scale, skip_bias = skip.chunk(2, dim=1)
out = func.silu(self.blocks[0](inputs))
out = scale * out + bias
out = self.blocks[1](out)
inputs = skip_scale * inputs + skip_bias
return self.zero(inputs, out)
class Energy(nn.Module):
def __init__(self, base):
super().__init__()
self.base = base
self.conv = nn.ModuleList([
nn.Conv2d(3, 32, 3, padding=1),
nn.Conv2d(32, 64, 3, padding=1),
nn.Conv2d(64, 128, 3, padding=1),
nn.Conv2d(128, 256, 3, padding=1)
])
self.res = nn.ModuleList([
ResBlock(32),
ResBlock(64),
ResBlock(128),
ResBlock(256),
])
self.W = nn.Linear(256, 256)
self.b = nn.Linear(256, 1)
def forward(self, inputs, levels, *args):
out = inputs
for res, conv in zip(self.res, self.conv):
out = func.silu(conv(out))
out = res(out, levels)
out = 2 ** 2 * func.avg_pool2d(out, 2)
features = out.size(-1) ** 2 * func.adaptive_avg_pool2d(out, 1)
features = features.view(features.size(0), -1)
quadratic = (features * self.W(features)).sum(dim=1, keepdim=True)
linear = self.b(features)
return quadratic + linear
class TotalEnergy(nn.Module):
def __init__(self, energy, levels):
super().__init__()
self.energy = energy
self.levels = levels
def forward(self, data: torch.Tensor, *args):
inputs = data.repeat_interleave(len(self.levels), dim=0)
levels = torch.cat(data.size(0) * [self.levels], dim=0)
factors = self.energy(inputs, levels, *args)
result = factors.view(-1, data.size(0), 1).sum(dim=0)
return result
class ConditionalEnergy(nn.Module):
def __init__(self, energy, origin, shift=0.025):
super().__init__()
self.energy = energy
self.origin = origin.detach()
self.shift = shift
def forward(self, data, level, *args):
raw_energy = self.energy(data, level)
dist = Normal(self.origin, self.shift)
cond = dist.log_prob(data)
cond = cond.view(cond.size(0), -1).mean(dim=1, keepdim=True)
return raw_energy + cond
if __name__ == "__main__":
opt = parse_options(
"CIFAR10 EBM using TNCE in flex.",
path="flexamples/cifar10-tdre-10",
device="cuda:0",
batch_size=8,
max_epochs=1000,
report_interval=1000
)
cifar10 = CIFAR10("examples/", download=False, transform=ToTensor())
data = CIFAR10Dataset(cifar10)
data = DataDistribution(
data, batch_size=opt.batch_size,
device=opt.device
)
base = Base().to(opt.device)
energy = Energy(base).to(opt.device)
levels = torch.arange(0.0, 1.0, 0.01, device=opt.device)
training = telescoping_density_ratio_training(
energy, base, data,
mixing=partial(
independent_mixing,
mixing=tdre_mixing,
levels=levels
),
optimizer_kwargs=dict(lr=1e-4),
telescoping_step=tdre_step,
train_base=False,
path=opt.path,
device=opt.device,
batch_size=opt.batch_size,
max_epochs=opt.max_epochs,
report_interval=opt.report_interval
)
# add generating images every few steps:
integrator = Langevin(
rate=-0.01, noise=0.01,
steps=5, max_norm=None,
clamp=(-1, 1)
)
training.add(
generate_step=partial(
generate_step, energy=energy,
base=base, integrator=integrator,
ctx=training
),
every=opt.report_interval
)
# training.get_step("tdre_step").extend(
# lambda args, ctx=None:
# ctx.log(real_images=LogImage(args.real_data.clamp(0, 1)))
# )
training.load()
training.train()
|
[
"jendrusch@stud.uni-heidelberg.de"
] |
jendrusch@stud.uni-heidelberg.de
|
efdfe5a0a9fd4511946056b84590b1ff8569b14c
|
4e7669f4234dbbcc6ef8206ac43bba33c53b8d1e
|
/Predictions/DataProcessing.py
|
73ecbdb3270823e4d440ea305f64b9d0f26fce93
|
[] |
no_license
|
chouhansolo/edbn
|
46cadbcb8d4e079cee746868663379b5b825286b
|
63cfcd7e5e5e17242aed3b1968119e85b2796015
|
refs/heads/master
| 2023-04-05T06:54:26.380161
| 2021-04-21T08:50:30
| 2021-04-21T08:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,175
|
py
|
"""
Author: Stephen Pauwels
"""
import os
import pickle
import pandas as pd
from RelatedMethods.Camargo.support_modules.role_discovery import role_discovery
from Utils.LogFile import LogFile
BPIC15 = "BPIC15"
BPIC15_1 = "BPIC15_1"
BPIC15_2 = "BPIC15_2"
BPIC15_3 = "BPIC15_3"
BPIC15_4 = "BPIC15_4"
BPIC15_5 = "BPIC15_5"
BPIC12 = "BPIC12"
BPIC12W = "BPIC12W"
HELPDESK = "HELPDESK"
BPIC18 = "BPIC18"
LOGFILE_PATH = "../Data/Logfiles"
def preprocess(logfile, add_end, reduce_tasks, resource_pools, resource_attr, remove_resource):
# Discover Roles
if resource_pools and resource_attr is not None:
resources, resource_table = role_discovery(logfile.get_data(), resource_attr, 0.5)
log_df_resources = pd.DataFrame.from_records(resource_table)
log_df_resources = log_df_resources.rename(index=str, columns={"resource": resource_attr})
print(logfile.data)
logfile.data = logfile.data.merge(log_df_resources, on=resource_attr, how='left')
logfile.categoricalAttributes.add("role")
if remove_resource:
logfile.data = logfile.data.drop([resource_attr], axis=1)
resource_attr = "role"
else:
logfile.data = logfile.data.rename(columns={resource_attr: "role"})
logfile.categoricalAttributes.add("role")
print(logfile.data)
if add_end:
cases = logfile.get_cases()
new_data = []
for case_name, case in cases:
record = {}
for col in logfile.data:
if col == logfile.trace:
record[col] = case_name
else:
record[col] = "start"
new_data.append(record)
for i in range(0, len(case)):
new_data.append(case.iloc[i].to_dict())
record = {}
for col in logfile.data:
if col == logfile.trace:
record[col] = case_name
else:
record[col] = "end"
new_data.append(record)
logfile.data = pd.DataFrame.from_records(new_data)
# Check for dublicate events with same resource
if reduce_tasks and resource_attr is not None:
cases = logfile.get_cases()
reduced = []
for case_name, case in cases:
reduced.append(case.iloc[0].to_dict())
current_trace = [case.iloc[0][[logfile.activity, resource_attr]].values]
for i in range(1, len(case)):
if case.iloc[i][logfile.activity] == current_trace[-1][0] and \
case.iloc[i][resource_attr] == current_trace[-1][1]:
pass
else:
current_trace.append(case.iloc[i][[logfile.activity, resource_attr]].values)
reduced.append(case.iloc[i].to_dict())
logfile.data = pd.DataFrame.from_records(reduced)
print("Removed duplicated events")
logfile.convert2int()
return logfile
def get_data(dataset, dataset_size, k, add_end, reduce_tasks, resource_pools, remove_resource):
filename_parts = [dataset, str(dataset_size), str(k)]
for v in [add_end, reduce_tasks, resource_pools, remove_resource]:
if v:
filename_parts.append(str(1))
else:
filename_parts.append(str(0))
print(filename_parts)
cache_file = LOGFILE_PATH + "/" + "_".join(filename_parts)
colTitles = []
if os.path.exists(cache_file):
print("Loading file from cache")
with open(cache_file, "rb") as pickle_file:
preprocessed_log = pickle.load(pickle_file)
else:
resource_attr = None
if dataset == BPIC15_1 or dataset == BPIC15:
logfile = LogFile("../Data/BPIC15_1_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_2:
logfile = LogFile("../Data/BPIC15_2_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID",
activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_3:
logfile = LogFile("../Data/BPIC15_3_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_4:
logfile = LogFile("../Data/BPIC15_4_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_5:
logfile = LogFile("../Data/BPIC15_5_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC12:
logfile = LogFile("../Data/BPIC12.csv", ",", 0, dataset_size, "completeTime", "case", activity_attr="event", convert=False, k=k)
resource_attr = "org:resource"
colTitles = ["case", "event", "org:resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC12W:
logfile = LogFile("../Data/BPIC12W.csv", ",", 0, dataset_size, "completeTime", "case", activity_attr="event", convert=False, k=k)
resource_attr = "org:resource"
colTitles = ["case", "event", "org:resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == HELPDESK:
logfile = LogFile("../Data/Helpdesk.csv", ",", 0, dataset_size, "completeTime", "case", activity_attr="event", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["case", "event", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(3)
elif dataset == BPIC18:
logfile = LogFile("../Data/BPIC18.csv", ",", 0, dataset_size, "startTime", "case", activity_attr="event", convert=False, k=k)
colTitles = ["case", "event", "subprocess"]
logfile.keep_attributes(colTitles)
else:
print("Unknown Dataset")
return None
preprocessed_log = preprocess(logfile, add_end, reduce_tasks, resource_pools, resource_attr, remove_resource)
preprocessed_log.create_k_context()
with open(cache_file, "wb") as pickle_file:
pickle.dump(preprocessed_log, pickle_file)
return preprocessed_log, "_".join(filename_parts)
def calc_charact():
import numpy as np
print("Calculating characteristics")
datasets = [BPIC12, BPIC12W, BPIC15_1, BPIC15_2, BPIC15_3, BPIC15_4, BPIC15_5, HELPDESK]
for dataset in datasets:
logfile, name = get_data(dataset, 20000000, 0, False, False, False, True)
cases = logfile.get_cases()
case_lengths = [len(c[1]) for c in cases]
print("Logfile:", name)
print("Num events:", len(logfile.get_data()))
print("Num cases:", len(cases))
print("Num activities:", len(logfile.get_data()[logfile.activity].unique()))
print("Avg activities in case:", np.average(case_lengths))
print("Max activities in case:", max(case_lengths))
print()
if __name__ == "__main__":
calc_charact()
|
[
"stephen.pauwels@uantwerpen.be"
] |
stephen.pauwels@uantwerpen.be
|
240b97aea52be8a26c3a5cf1be0c510ebeff50e0
|
bff37773d1e6c3f4bf8ae4eaa64d7a2d563ecf68
|
/backend/users/migrations/0002_auto_20201217_0711.py
|
46bfcb7d1b2be14df2fe21ac2b64e683539ccceb
|
[] |
no_license
|
crowdbotics-apps/mobile-17-dec-dev-16856
|
d405478f85248047e00ed97cd4b61fa5ca2a8fd6
|
b5c60c39b4e6715b17fa1e7dff6c72527f6ae967
|
refs/heads/master
| 2023-02-03T00:16:51.489994
| 2020-12-17T14:12:46
| 2020-12-17T14:12:46
| 322,203,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 2.2.17 on 2020-12-17 07:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2c705f84317dd7fb1c449692c21c19157c862a5f
|
e87d793b3a5facc6e54e0263fbd67703e1fbb382
|
/duckietown-world-venv/bin/jupyter-trust
|
b408b0d21630fc79d1e9443daa6b2a05bc45d37c
|
[] |
no_license
|
llingg/behaviour-benchmarking
|
a860bbe709309e13f3e1133d916944882199a40f
|
85bbf1a9c2c628ba74480fe7abac3804d6afdac4
|
refs/heads/v1
| 2022-10-06T08:21:29.068329
| 2020-06-11T07:02:46
| 2020-06-11T07:02:46
| 259,622,704
| 0
| 0
| null | 2020-06-02T17:52:46
| 2020-04-28T11:52:08
|
C++
|
UTF-8
|
Python
| false
| false
| 302
|
#!/home/linuslingg/duckietown-world/duckietown-world-venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from nbformat.sign import TrustNotebookApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(TrustNotebookApp.launch_instance())
|
[
"linggl@student.ethz.ch"
] |
linggl@student.ethz.ch
|
|
693351d13cbca26f5dc2f674b07e879c28cc09eb
|
b3a55844de9ff46972448b56ccadc1e3088adae1
|
/poptimizer/data/views/go.py
|
8e124ea774a85d74b096f602bcad3c5d32f544ed
|
[
"Unlicense"
] |
permissive
|
tjlee/poptimizer
|
480a155e2f4ffd5d6eda27323c5baa682d7f9f00
|
3a67544fd4c1bce39d67523799b76c9adfd03969
|
refs/heads/master
| 2023-08-15T10:16:11.161702
| 2021-10-15T15:35:38
| 2021-10-15T15:35:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
"""Предварительная версия интеграции с Go."""
import aiohttp
from bson import json_util
from poptimizer.shared import connections
async def rest_reader(session: aiohttp.ClientSession = connections.HTTP_SESSION):
async with session.get("http://localhost:3000/trading_dates/trading_dates") as respond:
respond.raise_for_status()
json = await respond.text()
return json_util.loads(json)
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
print(loop.run_until_complete(rest_reader()))
|
[
"wlmike@gmail.com"
] |
wlmike@gmail.com
|
f611a878a16540a8544d96b179da3dbe91d2edf7
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/INTERVIEW-PREP-COMPLETE/notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 04/sol1.py
|
ba8a39290c9cd8d45a5050c08b2e276e81e6c6f9
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
"""
Problem:
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 x 99.
Find the largest palindrome made from the product of two 3-digit numbers which is less than N.
"""
from __future__ import print_function
limit = int(input("limit? "))
# fetchs the next number
for number in range(limit - 1, 10000, -1):
# converts number into string.
strNumber = str(number)
# checks whether 'strNumber' is a palindrome.
if strNumber == strNumber[::-1]:
divisor = 999
# if 'number' is a product of two 3-digit numbers
# then number is the answer otherwise fetch next number.
while divisor != 99:
if (number % divisor == 0) and (len(str(number / divisor)) == 3):
print(number)
exit(0)
divisor -= 1
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
f1504401ecfae9c68665a07df227490f7bdde2e6
|
4f3a4c194451eae32f1ff7cf3b0db947e3892365
|
/39/main.py
|
380f721980637a5dbb3d095e6966d349ecfd7c39
|
[] |
no_license
|
szhongren/leetcode
|
84dd848edbfd728b344927f4f3c376b89b6a81f4
|
8cda0518440488992d7e2c70cb8555ec7b34083f
|
refs/heads/master
| 2021-12-01T01:34:54.639508
| 2021-11-30T05:54:45
| 2021-11-30T05:54:45
| 83,624,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
"""
Given a set of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
The same repeated number may be chosen from C unlimited number of times.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
For example, given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
"""
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.res = []
self.combinationSumRecur(sorted(candidates), target, 0, [])
# self.res = list(map(sorted, self.res))
# self.dedup = []
# for s in self.res:
# if s not in self.dedup:
# self.dedup.append(s)
return self.res
def combinationSumRecur(self, candidates, target, start, curr_set):
if target == 0:
self.res.append(curr_set)
else:
for i in range(start, len(candidates)):
if candidates[i] > target:
return
else:
self.combinationSumRecur(candidates, target - candidates[i], i, curr_set + [candidates[i]])
# for each val in candidates, get target - val, then see if that is in candidates
# if yes, add current set of vals to self.res
# recur on target - val
ans = Solution()
print(ans.combinationSum([2, 3, 6, 7], 7))
print(ans.combinationSum([92,71,89,74,102,91,70,119,86,116,114,106,80,81,115,99,117,93,76,77,111,110,75,104,95,112,94,73], 310))
|
[
"shao.zhongren@gmail.com"
] |
shao.zhongren@gmail.com
|
482d241112ea052ce15aca3724fab31234ee9eaf
|
18ab6f3ac3458db61f506bee8885c70d6de6c06e
|
/class_12/userhandling/accounts/models.py
|
69c6252de80e13838a61f0d57c38f6c4fdd2727d
|
[] |
no_license
|
coding-blocks-archives/Django2017Spring
|
8ca7a14e2d867cb07a60d2dca1c9138cada6c06a
|
008c32bc725918e93a0020b39e226c634b6f2e0f
|
refs/heads/master
| 2021-06-14T15:19:40.830677
| 2017-04-16T11:22:04
| 2017-04-16T11:22:04
| 79,050,330
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django import forms
# Create your models here.
class MyUser(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=50, default='')
address = models.CharField(max_length=300, default='')
contact = models.CharField(max_length=12, null=True)
def __unicode__(self):
return self.user.username
class RegisterForm(forms.Form):
name = forms.CharField(max_length=50, label='Your Name')
username = forms.CharField(max_length=20, label='Username')
password = forms.CharField(widget=forms.PasswordInput(), label='Password')
address = forms.CharField(max_length=200, label='Your Address')
contact = forms.CharField(max_length=12, label='You Contact')
class LoginForm(forms.Form):
username = forms.CharField(max_length=20, label='Username')
password = forms.CharField(widget=forms.PasswordInput(), label='Password')
|
[
"skd.1810@gmail.com"
] |
skd.1810@gmail.com
|
4255b09ee52495dfc8984febfc0cf9cfe0f5ca64
|
d86a7fcc543ab6066ca772f67551943ec4cad31a
|
/perf/metrics/aggregator.py
|
4d53cd7d3a0a44abc20d2129109d660a3f90cf05
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fossabot/lake
|
184b4db5a14725da919093ef0cb392c329166b89
|
75f2bf10ef50bb4979e52a7ce539ea5de00d3647
|
refs/heads/master
| 2022-12-11T10:00:46.239848
| 2020-09-16T05:04:37
| 2020-09-16T05:04:37
| 295,928,404
| 0
| 0
|
Apache-2.0
| 2020-09-16T05:04:36
| 2020-09-16T05:04:35
| null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import time
import os
import threading
class MetricsAggregator(threading.Thread):
def __init__(self, path):
super(MetricsAggregator, self).__init__()
self._stop_event = threading.Event()
self.__store = dict()
self.__path = path
self.__last_value = None
def stop(self) -> None:
self._stop_event.set()
self.join()
time.sleep(0.5)
self.__process_change()
def __process_change(self) -> None:
if not os.path.isfile(self.__path):
return
try:
with open(self.__path, mode='r', encoding='ascii') as fd:
data = json.load(fd)
(i, e, m) = data['messageIngress'], data['messageEgress'], data['memoryAllocated']
del data
value = '{}/{}/{}'.format(i, e, m)
if value != self.__last_value:
self.__store[str(int(time.time()*1000))] = value
self.__last_value = value
except:
pass
def get_metrics(self) -> dict:
return self.__store
def run(self) -> None:
self.__process_change()
while not self._stop_event.is_set():
self.__process_change()
time.sleep(1)
self.__process_change()
|
[
"noreply@github.com"
] |
fossabot.noreply@github.com
|
4e9776b12ce251408a9c5871641abe9f9225f6b2
|
d79f3a31d173f18ec112c521acdcee8e8e73724d
|
/getid.py
|
8a6fcb90734975a7c0dfc8ede803ef708a2c3468
|
[] |
no_license
|
k156/hello
|
3de815de569b38f8260e774e57b138f4da43f480
|
f5a7f386d3f78d15d7f166a95ad25724e168f472
|
refs/heads/master
| 2020-04-04T23:15:38.252126
| 2019-05-03T05:57:00
| 2019-05-03T05:57:00
| 156,352,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
USER = ""
PASS = ""
browser = webdriver.Chrome()
browser.implicitly_wait(3)
# 로그인 페이지에 접근하기.
url_login = "https://www.yes24.com/Templates/FTLogin.aspx?ReturnURL=http://ticket.yes24.com/Pages/Perf/Detail/Detail.aspx&&ReturnParams=IdPerf=30862"
browser.get(url_login)
print("로그인 페이지에 접근합니다.")
# 아이디와 비밀번호 입력하기.
e = browser.find_element_by_id("SMemberID")
e.clear()
e.send_keys(USER)
e = browser.find_element_by_id("SMemberPassword")
e.clear()
e.send_keys(PASS)
# 입력 양식 전송해서 로그인하기.
form = browser.find_element_by_css_selector("button#btnLogin").submit()
print("로그인 버튼을 클릭합니다.")
# 예매버튼 클릭.
reserve_bt = browser.find_element_by_class_name("rbt_reserve").click()
print("예매 버튼을 클릭합니다.")
# 팝업 창으로 전환.
browser.switch_to.window(browser.window_handles[1])
# 날짜 선택하기(26일)
date_sel = browser.find_element_by_id("2019-01-17").click()
sleep(1)
# '좌석선택' 버튼 클릭.
browser.find_element_by_css_selector("div.fr img").click()
soup = BeautifulSoup(res.text, 'html.parser')
print(soup)
|
[
"jm_91@live.co.kr"
] |
jm_91@live.co.kr
|
67a016a9d7ba978adccc3d947bf989f1fe06db71
|
98e944b793b2d907e802f979bc6309b75b678716
|
/shell/shell_person.py
|
30fde62c5f4a05fb73b9732127fd0ead9955e568
|
[] |
no_license
|
rg3915/avesmarias
|
3fa17416e64908714f164254434f3ec1a6423696
|
ce29072d17024b91e8afab309e203e68fc0e15d2
|
refs/heads/master
| 2021-01-12T11:32:45.196569
| 2016-11-06T14:54:05
| 2016-11-06T14:54:05
| 72,948,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import string
import random
import csv
from avesmarias.core.models import Person, Phone
PHONE_TYPE = ('pri', 'com', 'res', 'cel')
person_list = []
''' Read person.csv '''
with open('fix/person.csv', 'r') as f:
r = csv.DictReader(f)
for dct in r:
person_list.append(dct)
f.close()
''' Insert Persons '''
obj = [Person(**person) for person in person_list]
Person.objects.bulk_create(obj)
def gen_phone():
digits_ = str(''.join(random.choice(string.digits) for i in range(11)))
return '{} 9{}-{}'.format(digits_[:2], digits_[3:7], digits_[7:])
''' Insert Phones '''
persons = Person.objects.all()
for person in persons:
for i in range(1, random.randint(1, 5)):
Phone.objects.create(
person=person,
phone=gen_phone(),
phone_type=random.choice(PHONE_TYPE))
# Done
|
[
"rg3915@yahoo.com.br"
] |
rg3915@yahoo.com.br
|
638bb6032545c27060aeaa7fbe01b9a33bcf0ea7
|
d6a1630bcc03f059438f949ba4f59b86ef5a4bd6
|
/features/geopy_distance_features.py
|
882428e8ac4d7f9aaa832bb3288cfd7c98e3853d
|
[
"MIT"
] |
permissive
|
SunnyMarkLiu/Kaggle_NYC_Taxi_Trip_Duration
|
063f7327e9075fc7435930513cc36f8dbd35d256
|
eca7f44bc3bf1af0690305b45858359adac617b4
|
refs/heads/master
| 2021-01-02T09:30:25.639858
| 2017-09-13T03:53:18
| 2017-09-13T03:53:18
| 99,228,943
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
#!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-9-12 上午11:10
"""
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
import pandas as pd
from geopy.distance import great_circle
from utils import data_utils
from conf.configure import Configure
# remove warnings
import warnings
warnings.filterwarnings('ignore')
def main():
if os.path.exists(Configure.processed_train_path.format('8')):
return
train, test = data_utils.load_dataset(op_scope='7')
print 'train: {}, test: {}'.format(train.shape, test.shape)
trip_durations = train['trip_duration']
del train['trip_duration']
conbined_data = pd.concat([train, test])
def driving_distance(raw):
startpoint = (raw['pickup_latitude'], raw['pickup_longitude'])
endpoint = (raw['dropoff_latitude'], raw['dropoff_longitude'])
distance = great_circle(startpoint, endpoint).miles
return distance
print 'calc geopy distance features...'
conbined_data['osmnx_distance'] = conbined_data[['pickup_latitude', 'pickup_longitude',
'dropoff_latitude', 'dropoff_longitude']].apply(driving_distance,
axis=1)
train = conbined_data.iloc[:train.shape[0], :]
test = conbined_data.iloc[train.shape[0]:, :]
train['trip_duration'] = trip_durations
print 'train: {}, test: {}'.format(train.shape, test.shape)
print 'save dataset...'
data_utils.save_dataset(train, test, op_scope='8')
if __name__ == '__main__':
print '========== generate geopy distance features =========='
main()
|
[
"SunnyMarkLiu101@gmail.com"
] |
SunnyMarkLiu101@gmail.com
|
90d11cd9857b6436e79804bc753b2bbaf34a422d
|
fc3f784c8d00f419b11cbde660fe68a91fb080ca
|
/algoritm/20상반기 코딩테스트/소수 경로/bj1963.py
|
6f4d4774ef60720d7fc72ff334ec8ba7ecaf763d
|
[] |
no_license
|
choo0618/TIL
|
09f09c89c8141ba75bf92657ac39978913703637
|
70437a58015aecee8f3d86e6bfd0aa8dc11b5447
|
refs/heads/master
| 2021-06-25T07:01:34.246642
| 2020-12-21T04:57:13
| 2020-12-21T04:57:13
| 163,782,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
import sys
sys.stdin=open('bj1963.txt','r')
def Find(n,s):
ns=[]
for i in ['0','1','2','3','4','5','6','7','8','9']:
if not n and i=='0':continue
ss=s[:n]+i+s[n+1:]
if not Map[int(ss)] and s!=ss and not M[int(ss)]:
M[int(ss)]=1
ns.append(ss)
return ns
Map=[0]*10001
for i in range(2,10001):
if Map[i]:continue
tmp=i
while True:
tmp+=i
if tmp>10000:break
Map[tmp]=1
T=int(input())
for t in range(T):
n1,n2=map(int,input().split())
if n1==n2:print(0);continue
Que=[str(n1)]
M=[0]*10001
M[n1]=1
R,Check=0,0
while Que and not Check:
R+=1
Q=[]
for q in Que:
if int(q)==n2:Check=1;break
for i in range(4):
Q+=Find(i,q)
Que=Q
if Check:print(R-1)
else:print('Impossible')
|
[
"choo0618@naver.com"
] |
choo0618@naver.com
|
ff94c4fe9772efb3f93861e6eced73496ca45bfe
|
f3eb45a23b421ed8b160a6cf7c8670efb7e9ff4f
|
/4_digits_of_pi/3_dask_multicore_digits_of_pi.py
|
a30c429c179e07f93e73ecee53aed9a9898800f3
|
[
"MIT"
] |
permissive
|
zonca/intro_hpc
|
4197a49a3a3b2f8cfbe1cfb9d30e9d7f5100c8ac
|
b0ee213e95d045abdfbbf82849939a2bb4ea125b
|
refs/heads/master
| 2021-01-23T01:41:41.809291
| 2017-07-22T20:41:53
| 2017-07-22T21:10:29
| 92,886,908
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
#!/usr/bin/env python3
import sys
import numpy as np
import dask.array as da
def inside_circle(total_count):
x = da.random.uniform(size=total_count, chunks=total_count//48)
y = da.random.uniform(size=total_count, chunks=total_count//48)
radii_square = x**2 + y**2
count = (radii_square<=1.0).sum().compute()
return count
def estimate_pi(n_samples):
return (4.0 * inside_circle(n_samples) / n_samples)
if __name__=='__main__':
n_samples = 10000
if len(sys.argv) > 1:
n_samples = int(sys.argv[1])
my_pi = estimate_pi(n_samples)
sizeof = np.dtype(np.float64).itemsize
print("pi is {} from {} samples".format(my_pi,n_samples))
print("error is {:.3e}".format(abs(my_pi - np.pi)))
|
[
"code@andreazonca.com"
] |
code@andreazonca.com
|
9c90fde14be791e32a374c0dd2d82fad92ea21ef
|
27eec9c18320fbc20b0fbec628447a3442facc12
|
/CNN_ConvLSTM/utils/convlstm.py
|
f03883c2e05d74cdfccb1069d5bc90d47ba8268c
|
[
"MIT"
] |
permissive
|
peternara/ResNet_ConvLSTM
|
06428a400f8e93209d4b81f1a6d2b55a58bdb79a
|
1e2c239af6854d122f138f109d4c1de82930ce43
|
refs/heads/main
| 2023-05-09T12:43:49.965613
| 2021-06-01T02:49:02
| 2021-06-01T02:49:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,026
|
py
|
import torch.nn as nn
from torch.autograd import Variable
import torch
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim, #输出为4*hidden_dim,后面拆成四个部分
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
# 输入门,遗忘门,输出门,候选记忆细胞
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size):
return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())
class ConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]
cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
# 层数
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
# 序列长度
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
|
[
"noreply@github.com"
] |
peternara.noreply@github.com
|
82700d40eab51d34a591596e4a59417b39f75684
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03393/s165485969.py
|
2108db1765fd8cb7c870158fd75a81cab596eee9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import re
import sys
import math
import itertools
import bisect
from copy import copy
from collections import deque,Counter
from decimal import Decimal
import functools
def v(): return input()
def k(): return int(input())
def S(): return input().split()
def I(): return map(int,input().split())
def X(): return list(input())
def L(): return list(input().split())
def l(): return list(map(int,input().split()))
def lcm(a,b): return a*b//math.gcd(a,b)
sys.setrecursionlimit(10 ** 9)
mod = 10**9+7
cnt = 0
ans = 0
inf = float("inf")
al = "abcdefghijklmnopqrstuvwxyz"
import string
s = v()
if s == 'zyxwvutsrqponmlkjihgfedcba':
print(-1)
exit()
lis =list(string.ascii_lowercase)
nlis = [0]*26
for i in s:
t = lis.index(i)
nlis[t] += 1
if sum(nlis) != 26:
for i in range(26):
if nlis[i] == 0:
print(s+lis[i])
break
else:
for i in range(25, -1, -1):
for j in lis:
if s[i] < j and j not in s[:i]:
print(s[:i] + j)
exit()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
23532656417e4f17a6b726c887f112f46a905d58
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/right_life.py
|
a00c57735a1259cdb92912789021f1a768eacd33
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#! /usr/bin/env python
def life_or_little_time(str_arg):
hand(str_arg)
print('work_bad_part')
def hand(str_arg):
print(str_arg)
if __name__ == '__main__':
life_or_little_time('week_and_bad_fact')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
02a864677772bde23c7d4bf75b729b9a113adbe6
|
42240f6bbabcfb7a8e2f0957ab2d3c46c2920fd1
|
/lib/python/statcode/filetype_config.py
|
58a15f709ddbf0882bf49841242a61fad5dd2d34
|
[
"Apache-2.0"
] |
permissive
|
simone-campagna/statcode
|
164a219c699551b70ee12640f42199b72cc76879
|
a9f39b666d9670b9916623fde343b9174d563724
|
refs/heads/master
| 2021-01-01T06:32:25.734613
| 2013-09-17T08:12:49
| 2013-09-17T08:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
#!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
from .config import Config
class FileTypeConfig(Config):
DEFAULT_CATEGORY = '{no-category}'
__defaults__ = {
'binary': 'False',
'category': DEFAULT_CATEGORY,
'file_extensions': '',
'file_patterns': '',
'interpreter_patterns': '',
'keywords': '',
'regular_expressions': '',
}
|
[
"simone.campagna@tiscali.it"
] |
simone.campagna@tiscali.it
|
e71765761571691b4c463f3710a44d6329846b82
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-Intents/PyObjCTest/test_inpersonhandlelabel.py
|
ad84f5b6277cb2d3c6b8928edf94837316b6d5fe
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
import sys
from PyObjCTools.TestSupport import *
if sys.maxsize > 2 ** 32:
import Intents
class TestINPersonHandleLabel (TestCase):
@min_os_level('10.12')
def testConstants(self):
self.assertIsInstance(Intents.INPersonHandleLabelHome, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelWork, unicode)
self.assertIsInstance(Intents.INPersonHandleLabeliPhone, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelMobile, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelMain, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelHomeFax, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelWorkFax , unicode)
self.assertIsInstance(Intents.INPersonHandleLabelPager, unicode)
self.assertIsInstance(Intents.INPersonHandleLabelOther, unicode)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
4750432b226683768a660d9a7566173f603adfbd
|
0f4cacd40260137d3d0b3d1b34be58ac76fc8bd0
|
/2016/advent24.my.py
|
df8a2c52a1d331b2427d0dbb0405963b4335febe
|
[] |
no_license
|
timrprobocom/advent-of-code
|
45bc765e6ee84e8d015543b1f2fa3003c830e60e
|
dc4d8955f71a92f7e9c92a36caeb954c208c50e7
|
refs/heads/master
| 2023-01-06T07:19:03.509467
| 2022-12-27T18:28:30
| 2022-12-27T18:28:30
| 161,268,871
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,570
|
py
|
#
# Holy shit.
#
grid = """\
###########
#0.1.....2#
#.#######.#
#4.......3#
###########""".splitlines()
xmax = len(grid[0])
ymax = len(grid)
# 0 is at row 18 col 3
# So, as long as there are no decisions, move forward. When we reach a decision point,
# push the point on a stack, pick left, continue on.
# Stop when :
# - no possible choices
# - we hit all 8 numbers
# - path is longer then the current shortest win
# - we reach a visited point with the same collection of items
#
# Sheesh, one of the numbers is in a dead end, so we can't deny retracing.
# I suppose we can stop if we reach a point x with the same collection of items.
# Should preprocess to identify possible directions out of each point?
N,E,S,W = range(4)
deltas = ((-1,0),(0,1),(1,0),(0,-1))
def buildGrid( grid ):
dgrid = []
pills = {}
for y in range(ymax):
row = []
for x in range(xmax):
c = grid[y][x]
if c == '#':
row.append([])
else:
# Check N E S W
works = []
for dy,dx in deltas:
if 0 <= x+dx <= xmax and \
0 <= y+dy <= ymax and \
grid[y+dy][x+dx] != '#':
works.append( (dy,dx) )
row.append( works )
if c != '.':
pills[(y,x)] = c
dgrid.append( row )
return dgrid, pills
dgrid, pills = buildGrid( grid )
decisions = []
stack = []
class State(object):
def __init__(self, x0, y0 ):
self.x0 = x0
self.y0 = y0
self.came = None
self.found = []
self.path = []
self.choices = ()
def familiar(self):
return (self.y0,self.x0,self.found) in self.path
def update( self, pair ):
self.path.append( (self.y0, self.x0, self.found) )
self.y0 += pair[0]
self.x0 += pair[1]
def len(self):
return len(self.path)
def push(self):
print "Pushing state"
print self.path
stack.append( self.__dict__.copy() )
def pop(self):
print "Popping state"
dct = stack.pop()
self.__dict__.update( dct )
print self.path
def oneStep( s ):
y0, x0 = s.y0, s.x0
print "At ", y0, x0
s.choices = dgrid[y0][x0][:]
if (y0,x0) in pills:
p = pills[(y0,x0)]
if p not in s.found:
print "Found ", p
s.found += p
if len(s.found) == len(pills):
print "*** found everything *** length ", s.len()
s.pop()
return
if s.came:
print "Came from ", s.came
print "Choices are ", s.choices
s.choices.remove( s.came )
if len(s.choices) == 0:
print "No more choices"
s.pop()
return
if s.familiar():
print "We've been here before."
s.pop()
return
if len(s.choices) == 1:
print "Must go ", s.choices[0]
s.came = tuple(-k for k in s.choices[0])
s.update( s.choices[0] )
return
s.push()
pick = s.choices.pop(0)
print "First choice ", pick
s.came = tuple(-k for k in pick)
s.update( pick )
state = State( 1, 1 );
state.push()
while 1:
oneStep(state)
# Remember where we came from
# At each step:
# Take list of choices
# Remove from where we came
# If there is only one remaining
# Go that way
# Otherwise
# Remember x, y, treasures,
# for each possibility
# Try it
|
[
"timr@probo.com"
] |
timr@probo.com
|
2a715ba1c3bd9636d92fbac36798cfaf9786dc35
|
5dd03f9bd8886f02315c254eb2569e4b6d368849
|
/src/python/twitter/common/python/eggparser.py
|
9b5b6bdc1282fe4543ffc44cae5daacea7063937
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
adamsxu/commons
|
9e1bff8be131f5b802d3aadc9916d5f3a760166c
|
9fd5a4ab142295692994b012a2a2ef3935d35c0b
|
refs/heads/master
| 2021-01-17T23:13:51.478337
| 2012-03-11T17:30:24
| 2012-03-11T17:30:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,547
|
py
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
class EggParserOsModule:
"""
Abstraction of the os-level functions the egg parser needs, so we can
break it in tests.
"""
@staticmethod
def uname():
import os
return os.uname()
@staticmethod
def version_info():
import sys
return sys.version_info
class EggParser(object):
"""
Parser of .egg filenames, which come in the following format:
name ["-" version ["-py" pyver ["-" required_platform]]] "." ext
"""
def __init__(self,
uname = EggParserOsModule.uname(),
version_info = EggParserOsModule.version_info()):
self._uname = uname
self._version_info = version_info
@staticmethod
def _get_egg_name(components):
return (components[0], components[1:])
@staticmethod
def _get_egg_version(components):
for k in range(len(components)):
if components[k].startswith("py"):
return ('-'.join(components[0:k]), components[k:])
if components:
return ('-'.join(components), [])
else:
return (None, [])
@staticmethod
def _get_egg_py_version(components):
if components and components[0].startswith("py"):
try:
major, minor = components[0][2:].split('.')
major, minor = int(major), int(minor)
return ((major, minor), components[1:])
except:
pass
return ((), components)
@staticmethod
def _get_egg_platform(components):
return (tuple(components), [])
def parse(self, filename):
if not filename: return None
if not filename.endswith('.egg'): return None
components = filename[0:-len('.egg')].split('-')
package_name, components = EggParser._get_egg_name(components)
package_version, components = EggParser._get_egg_version(components)
package_py_version, components = EggParser._get_egg_py_version(components)
package_platform, components = EggParser._get_egg_platform(components)
return (package_name, package_version, package_py_version, package_platform)
def get_architecture(self):
py_version = self._version_info[0:2]
platform = self._uname[0].lower()
arch = self._uname[-1].lower()
if platform == 'darwin': platform = 'macosx'
return (platform, arch, py_version)
def is_compatible(self, filename):
try:
_, _, egg_py_version, egg_platform = self.parse(filename)
except:
return False
my_platform, my_arch, my_py_version = self.get_architecture()
if egg_py_version and egg_py_version != my_py_version: return False
if egg_platform and egg_platform[0] != my_platform: return False
# ignore specific architectures until we ever actually care.
return True
|
[
"jsirois@twitter.com"
] |
jsirois@twitter.com
|
10ba8b7670ca96c7d6a83e9a4cbb5484f4e95a53
|
446bd1170475e640e4a50476cd80514b0693ee61
|
/demo/demo1/demo1/picture/jishe/demo2/Demo3/Demo3/spiders/main.py
|
04dfd6ea4f4f2efd572b417bf2be0aa4f5725558
|
[] |
no_license
|
HarperHao/python
|
f040e1e76a243a3dba2b342029a74a45232c1c8d
|
4bd807605c0acca57b8eea6444b63d36d758cca9
|
refs/heads/master
| 2021-07-20T04:40:37.515221
| 2020-10-02T08:58:37
| 2020-10-02T08:58:37
| 219,732,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
# from scrapy.cmdline import execute
#
# execute("scrapy crawl zuowumianji".split())
import numpy as np
# LU分解
def LU_Decompose(matrix):
rows, columns = np.shape(matrix)
if rows != columns:
print("所输入的矩阵必须是方阵!")
return
L = np.eye(rows)
U = np.triu(matrix) # 先求出U矩阵(化上三角矩阵)
# 求L矩阵(主对角线为1的下三角矩阵)
L[:, 0] = matrix[:, 0] / U[0][0] # L的第一列
for k in range(1, columns - 1): # 从第2列到columns-1列
for i in range(k + 1, rows): # 从第3行到第rows行
sum = 0
for j in range(0, k - 1): # (0,0)不行
x = L[i][j] * U[j][k]
sum = sum + x
L[i][k] = (matrix[i][k] - sum) / U[k][k]
return L, U
# 解LY=b
def solve_equation1(L, b):
columns = np.shape(b)[0]
y = []
y.append(b[0][0]) # y0=b0
for i in range(1, columns): # 求yi
sum = 0
for j in range(i):
sum = sum + L[i][j] * y[j]
y_ = b[i][0] - sum
y.append(y_)
return y
# 解UX=Y
def solve_equation2(U, Y):
columns = np.shape(Y)[0]
X = [i for i in range(columns)] # 先给X初始化
if U[columns - 1] == 0:
X[columns - 1] = Y[columns - 1] / U[columns - 1][columns - 1] # Xcolumns-1=Ycolumns-1/U[columns-1][columns-1]
else:
X[columns - 1] = 0
matrix = np.array([[2, -1, 1],
[4, 1, -1],
[1, 1, 1]])
rows, columns = np.shape(matrix)
L, U = LU_Decompose(matrix)
# b = np.eye(rows)
b = np.array([1, 5, 0]).reshape(3, 1)
# y = solve_equation1(L, b)
print(L, U)
|
[
"m19834406344@163.com"
] |
m19834406344@163.com
|
be0a4b0ae23f12893b303e8bc4cb504c7f517d0f
|
d0f11aa36b8c594a09aa06ff15080d508e2f294c
|
/leecode/1-500/1-100/39-组合总和.py
|
751f2a332ffc2b86704c60a28303c9b7f6961e04
|
[] |
no_license
|
saycmily/vtk-and-python
|
153c1fe9953fce685903f938e174d3719eada0f5
|
5045d7c44a5af5c16df5a3b72c157e9a2928a563
|
refs/heads/master
| 2023-01-28T14:02:59.970115
| 2021-04-28T09:03:32
| 2021-04-28T09:03:32
| 161,468,316
| 1
| 1
| null | 2023-01-12T05:59:39
| 2018-12-12T10:00:08
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
class Solution:
def combinationSum(self, candidates, target: int):
candidates.sort()
n = len(candidates)
res = []
def backtrack(tmp, tmp_sum=0, first=0):
if tmp_sum == target:
res.append(tmp.copy())
return
for j in range(first, n):
if tmp_sum + candidates[j] > target:
break
tmp.append(candidates[j])
backtrack(tmp, tmp_sum + candidates[j], j)
tmp.pop()
backtrack([])
return res
|
[
"1786386686@qq.com"
] |
1786386686@qq.com
|
eb64e5d68a519e53d4e37ab1f2670f115f660766
|
f02b21d5072cb66af643a7070cf0df4401229d6e
|
/leetcode/depth_first_search/695-max_area_of_island.py
|
939a2da8c3ed73d926cf1e1b3a4173e9f7dc2bbb
|
[] |
no_license
|
dbconfession78/interview_prep
|
af75699f191d47be1239d7f842456c68c92b95db
|
7f9572fc6e72bcd3ef1a22b08db099e1d21a1943
|
refs/heads/master
| 2018-10-09T22:03:55.283172
| 2018-06-23T01:18:00
| 2018-06-23T01:18:00
| 110,733,251
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,673
|
py
|
import sys
class Solution:
# def maxAreaOfIsland_PRACTICE(self, grid):
def maxAreaOfIsland(self, grid):
retval = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
retval = max(retval, self.helper(grid, i, j, 0))
return retval
def helper(self, grid, i, j, area):
if i < 0 or j < 0 or i > len(grid) - 1 or j > len(grid[i]) - 1 or grid[i][j] == 0:
return area
grid[i][j] = 0
area += 1
area = self.helper(grid, i, j + 1, area)
area = self.helper(grid, i + 1, j, area)
area = self.helper(grid, i, j - 1, area)
area = self.helper(grid, i - 1, j, area)
return area
def maxAreaOfIsland_PASSED(self, grid):
# def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
big = 0
i = j = 0
while i < len(grid):
j = 0
while j < len(grid[i]):
if grid[i][j] == 1:
big = max(big, self.explore(grid, i, j))
j += 1
i += 1
return big
def explore(self, grid, i, j):
if i < 0 or i > len(grid) - 1 or j < 0 or j > len(grid[i]) - 1 or grid[i][j] == 0:
return 0
grid[i][j] = 0
count = 1
count += self.explore(grid, i, j + 1)
count += self.explore(grid, i, j - 1)
count += self.explore(grid, i - 1, j)
count += self.explore(grid, i + 1, j)
return count
def print_map(grid):
for row in grid:
for cell in row:
sys.stdout.write('{} '.format(cell))
print()
def main():
# 4
print(Solution().maxAreaOfIsland([
[1,1,0,0,0],
[1,1,0,0,0],
[0,0,0,1,1],
[0,0,0,1,1]
]))
# 3
print(Solution().maxAreaOfIsland([
[1, 1, 0, 1, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 1, 0, 1, 1]
]))
# 1
print(Solution().maxAreaOfIsland(([[1]])))
# 6
print(Solution().maxAreaOfIsland([
[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]))
# LC Input
# [[1,1,0,0,0],[1,1,0,0,0],[0,0,0,1,1],[0,0,0,1,1]]
# [[1,1,0,1,1],[1,0,0,0,0],[0,0,0,0,1],[1,1,0,1,1]]
# [[1]]
# [[0,0,1,0,0,0,0,1,0,0,0,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,1,1,0,1,0,0,0,0,0,0,0,0],[0,1,0,0,1,1,0,0,1,0,1,0,0],[0,1,0,0,1,1,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0,0,0,1,0,0],[0,0,0,0,0,0,0,1,1,1,0,0,0],[0,0,0,0,0,0,0,1,1,0,0,0,0]]
if __name__ == '__main__':
main()
# Instructions
"""
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.) You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array. (If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11, because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
"""
|
[
"Hyrenkosa1"
] |
Hyrenkosa1
|
ae09834689a7ed3701d3ef9439f82ccc31caa63b
|
3e4d78628a66927e2a640ca4f328adcc31e156b9
|
/deejay/queuer.py
|
be6d7a434cdd0da2d3e15b533b77f38a4bf36a50
|
[] |
no_license
|
nijotz/shitstream
|
360d41a1411dc480dd220790f9513d202a18ee78
|
7d11171fb35aaf6d778d5bf23046d220939711be
|
refs/heads/master
| 2021-01-01T16:19:22.224760
| 2014-10-16T22:48:17
| 2014-10-16T22:48:17
| 23,303,299
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,892
|
py
|
import os
import re
import pyechonest.config
import pyechonest.song
import pyechonest.playlist
from downloaders.youtube import search as youtube_search
from mpd_util import mpd
from server import app
import settings
pyechonest.config.CODEGEN_BINARY_OVERRIDE = settings.dj_codegen_binary
pyechonest.config.ECHO_NEST_API_KEY = settings.dj_echonest_api_key
logger = app.logger
@mpd
def queuer(mpdc):
while True:
try:
if should_queue(mpdc=mpdc):
logger.info('Should queue, dewin it')
queue_shit(mpdc=mpdc)
else:
logger.info('Should not queue')
logger.info('Queuer waiting')
mpdc.idle(['playlist', 'player'])
except Exception as e:
logger.exception(e)
logger.error('Queuer failure, starting over')
@mpd
def should_queue(mpdc):
current_song = mpdc.currentsong()
if not current_song:
return False
current_pos = int(current_song.get('pos'))
queue = mpdc.playlistinfo()
next_songs = filter(lambda x: int(x.get('pos')) >= current_pos, queue)
timeleft = reduce(lambda x, y: x + float(y.get('time')), next_songs, 0)
timeleft -= float(mpdc.status().get('elapsed', 0))
if timeleft < (60 * 10):
return True
return False
@mpd
def prev_songs(mpdc, num=5):
"Get the last songs listened to"
current_song = mpdc.currentsong()
if not current_song:
return []
current_pos = int(current_song.get('pos'))
queue = mpdc.playlistinfo()
queue = filter(lambda x: not x.get('file', '').startswith(settings.dj_bumps_dir), queue) #FIXME: bumps filter needs dry
queue_dict = dict([ (int(song.get('pos')), song) for song in queue ])
sample = []
i = current_pos
while len(sample) < num and i >= 0:
song = queue_dict.get(i)
if song:
sample.append(song)
i -= 1
return sample
@mpd
def queue_shit(mpdc):
prev = prev_songs(mpdc=mpdc)
recs = get_recommendations(prev)
for song in recs:
mpd_songs = mpdc.search('artist', song.artist_name, 'title', song.title)
if mpd_songs:
mpdc.add(mpd_songs[0].get('file'))
continue
mpd_songs = mpdc.search('artist', song.artist_name)
if mpd_songs:
mpdc.add(mpd_songs[0].get('file'))
continue
url = youtube_search(u'{} {}'.format(song.artist_name, song.title))
if url:
from server import add_url #FIXME
def log(x):
logger.info(x)
add_url(url, log)
def find_youtube_vide(song):
pass
def get_recommendations(prev):
songs = []
for song in prev:
more_songs = identify_song(song)
if more_songs:
songs.append(more_songs)
song_ids = [song.id for song in songs]
if not song_ids:
logger.info('No previous songs identified')
return []
logger.info('Identified {} previous songs'.format(len(song_ids)))
result = pyechonest.playlist.static(type='song-radio', song_id=song_ids, results=10)
return result[5:] # Does echonest return the five songs I gave it to seed? Looks like..
@mpd
def identify_song(song, mpdc):
artist = song.get('artist')
title = song.get('title')
if not (artist or title):
return #TODO: try harder
results = pyechonest.song.search(artist=artist, title=title)
if results:
return results[0]
logger.warn(u'No results for: {} - {}'.format(artist,title))
# try stripping weird characters from the names
artist = re.sub(r'([^\s\w]|_)+', '', artist)
title = re.sub(r'([^\s\w]|_)+', '', title)
results = pyechonest.song.search(artist=artist, title=title)
if results:
return results[0]
logger.warn(u'No results for: {} - {}'.format(artist,title))
personality = queuer
|
[
"nick@nijotz.com"
] |
nick@nijotz.com
|
1223460f79aa83654eb9c6e0e3b50f90b2366482
|
a364f53dda3a96c59b2b54799907f7d5cde57214
|
/easy/35-Search Insertion Position.py
|
6c7c2e089fbf94d023498b845a9645138b07243e
|
[
"Apache-2.0"
] |
permissive
|
Davidxswang/leetcode
|
641cc5c10d2a97d5eb0396be0cfc818f371aff52
|
d554b7f5228f14c646f726ddb91014a612673e06
|
refs/heads/master
| 2022-12-24T11:31:48.930229
| 2020-10-08T06:02:57
| 2020-10-08T06:02:57
| 260,053,912
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
"""
https://leetcode.com/problems/search-insert-position/
Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You may assume no duplicates in the array.
Example 1:
Input: [1,3,5,6], 5
Output: 2
Example 2:
Input: [1,3,5,6], 2
Output: 1
Example 3:
Input: [1,3,5,6], 7
Output: 4
Example 4:
Input: [1,3,5,6], 0
Output: 0
"""
# it is pretty simple, since the array is monotonically increasing, we should check == first
# if not, check <, move toward the end if yes
# if found a nums[i] > target, it indicates that the target is >num[i-1] and target is < nums[i], return i
# if in the end, nothing found, add this target at the end of the original list
# time complexity: O(n), space complexity: O(1)
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
i = 0
while i < len(nums):
if nums[i] == target:
return i
if nums[i] < target:
i += 1
continue
if nums[i] > target:
return i
return len(nums)
|
[
"wxs199327@hotmail.com"
] |
wxs199327@hotmail.com
|
3cc745f34716dfeb254720f8c0c01a80b7c5d438
|
67ca269e39935d0c439329c3a63df859e40168bb
|
/autoPyTorch/pipeline/components/setup/lr_scheduler/constants.py
|
2e5895632deb9caa92b26070d7495d27d57ba970
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
automl/Auto-PyTorch
|
2e67ffb44f40d9993470ded9b63f10a5164b41df
|
56a2ac1d69c7c61a847c678879a67f5d3672b3e8
|
refs/heads/master
| 2023-07-14T22:55:57.826602
| 2022-08-23T16:43:15
| 2022-08-23T16:43:15
| 159,791,040
| 2,214
| 280
|
Apache-2.0
| 2023-04-04T14:41:15
| 2018-11-30T08:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 450
|
py
|
from enum import Enum
class StepIntervalUnit(Enum):
"""
By which interval we perform the step for learning rate schedulers.
Attributes:
batch (str): We update every batch evaluation
epoch (str): We update every epoch
valid (str): We update every validation
"""
batch = 'batch'
epoch = 'epoch'
valid = 'valid'
StepIntervalUnitChoices = [step_interval.name for step_interval in StepIntervalUnit]
|
[
"noreply@github.com"
] |
automl.noreply@github.com
|
744ef4550ea1381e96181d3d0cf7df33ca8a133d
|
762de1c66746267e05d53184d7854934616416ee
|
/tools/MolSurfGenService/MolSurfaceGen32/chimera/share/VolumeData/tom_em/em_format.py
|
6c027e9831ce8864e76310e1e1193380a04ead29
|
[] |
no_license
|
project-renard-survey/semanticscience
|
6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677
|
024890dba56c3e82ea2cf8c773965117f8cda339
|
refs/heads/master
| 2021-07-07T21:47:17.767414
| 2017-10-04T12:13:50
| 2017-10-04T12:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,434
|
py
|
# -----------------------------------------------------------------------------
# Read TOM Toolbox EM density map file (http://www.biochem.mpg.de/tom/)
# electron microscope data.
#
# Byte swapping will be done if needed.
#
# -----------------------------------------------------------------------------
#
class EM_Data:
def __init__(self, path):
self.path = path
import os.path
self.name = os.path.basename(path)
file = open(path, 'rb')
file.seek(0,2) # go to end of file
file_size = file.tell()
file.seek(0,0) # go to beginning of file
# Determine byte order from machine code
# OS-9 0
# VAX 1
# Convex 2
# SGI 3
# Sun 4 (not supported)
# Mac 5
# PC 6
self.swap_bytes = False
from numpy import int8, little_endian
machine_code = self.read_values(file, int8, 1)
file_little_endian = machine_code in (1, 6)
self.swap_bytes = ((file_little_endian and not little_endian) or
(not file_little_endian and little_endian))
file.seek(0,0)
v = self.read_header_values(file)
self.check_header_values(v, file_size)
self.data_offset = file.tell()
file.close()
self.data_size = (v['xsize'], v['ysize'], v['zsize'])
dstep = v['pixelsize']
if dstep == 0:
dstep = 1.0
self.data_step = (dstep, dstep, dstep)
self.data_origin = (0., 0., 0.)
# ---------------------------------------------------------------------------
# Format derived from C header file mrc.h.
#
def read_header_values(self, file):
from numpy import int8, int32
i8 = int8
i32 = int32
v = {}
v['machine code']= self.read_values(file, i8, 1)
v['os 9 version']= self.read_values(file, i8, 1)
v['abandoned header']= self.read_values(file, i8, 1)
v['data type code']= self.read_values(file, i8, 1)
v['xsize'], v['ysize'], v['zsize'] = self.read_values(file, i32, 3)
v['comment'] = file.read(80)
v['user param'] = self.read_values(file, i32, 40)
v['pixelsize'] = v['user param'][6] / 1000.0 # nm
v['user data'] = file.read(256)
return v
# ---------------------------------------------------------------------------
#
def check_header_values(self, v, file_size):
mc = v['machine code']
if mc < 0 or mc > 6:
raise SyntaxError, ('Bad EM machine code %d at byte 0, must be 0 - 6.'
% mc)
dc = v['data type code']
if not dc in (1,2,4,5,8,9):
raise SyntaxError, ('Bad EM data type code %d' % dc +
', must be 1, 2, 4, 5, 8, or 9')
from numpy import uint8, int16, int32, float32, float64
types = { 1: uint8,
2: int16,
4: int32,
5: float32,
9: float64 }
if types.has_key(dc):
self.element_type = types[dc]
else:
raise SyntaxError, 'Complex EM data value type not supported'
if float(v['xsize']) * float(v['ysize']) * float(v['zsize']) > file_size:
raise SyntaxError, ('File size %d too small for grid size (%d,%d,%d)'
% (file_size, v['xsize'],v['ysize'],v['zsize']))
# ---------------------------------------------------------------------------
#
def read_values(self, file, etype, count):
from numpy import array
esize = array((), etype).itemsize
string = file.read(esize * count)
values = self.read_values_from_string(string, etype, count)
return values
# ---------------------------------------------------------------------------
#
def read_values_from_string(self, string, etype, count):
from numpy import fromstring
values = fromstring(string, etype)
if self.swap_bytes:
values = values.byteswap()
if count == 1:
return values[0]
return values
# ---------------------------------------------------------------------------
# Returns 3D NumPy matrix with zyx index order.
#
def read_matrix(self, ijk_origin, ijk_size, ijk_step, progress):
from VolumeData.readarray import read_array
matrix = read_array(self.path, self.data_offset,
ijk_origin, ijk_size, ijk_step,
self.data_size, self.element_type, self.swap_bytes,
progress)
return matrix
|
[
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] |
alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5
|
778ae380f0cad62eb41f8e8dbe2862993143ee93
|
495907c7e2e2d591df2d6906335c3d89c5a4a47b
|
/helpers/logHelpers.py
|
42129a0969e389b0136e85078c31651d76b26bbb
|
[] |
no_license
|
ePandda/idigpaleo-ingest
|
319194125aded01f018cfb7c1fe7044fe8c66770
|
8473ab31e7a56878236136d0ace285ab3738f208
|
refs/heads/master
| 2020-11-26T19:48:13.959972
| 2018-04-21T17:19:38
| 2018-04-21T17:19:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
#
# Class for logining status/errors from the ingest
#
# Uses the main pythn logging module
import logging
import time
def createLog(module, level):
logger = logging.getLogger(module)
if level:
checkLevel = level.lower()
else:
checkLevel = 'warning'
levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL}
today = time.strftime("%Y_%m_%d")
loggerFile = './logs/'+today+"_ingest.log"
fileLog = logging.FileHandler(loggerFile)
conLog = logging.StreamHandler()
if checkLevel in levels:
logger.setLevel(levels[checkLevel])
fileLog.setLevel(levels[checkLevel])
conLog.setLevel(levels[checkLevel])
else:
fileLog.setLevel(levels['warning'])
conLog.setLevel(levels['warning'])
formatter = logging.Formatter('%(asctime)s_%(name)s_%(levelname)s: %(message)s')
fileLog.setFormatter(formatter)
conLog.setFormatter(formatter)
logger.addHandler(fileLog)
logger.addHandler(conLog)
return logger
|
[
"mwbenowitz@gmail.com"
] |
mwbenowitz@gmail.com
|
5b7de37d3e3ae6122a53cb151f264294e1e07cfd
|
4ec57b6ca1125feb546487ebf736fb1f7f3531ce
|
/src/bin/huawei_server/collect.py
|
7e38908b1087dbed5c09a721f9b18ad0fac206cd
|
[
"MIT"
] |
permissive
|
Huawei/Server_Management_Plugin_Nagios
|
df595b350ef1cf63347725e443b518c521afde5a
|
fbfbb852a90b6e1283288111eadbd49af2e79343
|
refs/heads/master
| 2022-09-14T15:59:48.438453
| 2022-08-22T03:19:20
| 2022-08-22T03:19:20
| 120,845,298
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,370
|
py
|
#encoding:utf-8
import sys
import os
import traceback
from config import VERSTION_STR
from base.logger import Logger
from model.plugin import LoggerConfig
from service.collectservice import CollectService
from util.common import Common
from util.check import Check
from constant.constant import *
from threading import Timer
import time
def loggerConfig(node, loggerData):
elements = Common.getChild(node);
if elements is None:
return False;
for element in elements:
if "param" != element.tag:
return False;
if "level" == element.attrib.get("name"):
loggerData.setLoggerLevel(element.text);
elif "size" == element.attrib.get("name"):
loggerData.setLoggerSize(element.text);
elif "index" == element.attrib.get("name"):
loggerData.setLoggerIndex(element.text);
loggerData.setLoggerPath(Common.getExePath() + os.path.sep + "log");
return True;
def pluginConfig(loggerData):
if not os.path.exists(Common.getPluginConfigPath()):
return False;
root = Common.getRoot(Common.getPluginConfigPath());
if root is None:
return False;
for node in Common.getChild(root):
if "config" != node.tag:
return False;
if "log" == node.attrib.get('name'):
loggerConfig(node, loggerData);
return True;
def initPlugin():
#parse plugin config
loggerData = LoggerConfig();
if not pluginConfig(loggerData):
return False;
logger = Logger.getInstance().init(loggerData);
return True;
def main(argv=None):
if not initPlugin():
return -1;
Logger.getInstance().error('========= %s ======='%VERSTION_STR)
if(len(argv) < 2):
Logger.getInstance().error("main error: param length should not be zero.");
return -1;
try:
if "-p" == argv[1]:
if not Check.checkPluginModeParam(argv[2:]):
Logger.getInstance().error("main error: param is invalid, param=%s." % sys.argv[1:]);
return -1;
service = CollectService(COLLECT_MODE_CMD_PLUGIN, None);
elif "-a" == argv[1]:
if not Check.checkTotalModeParam(argv[2:]):
Logger.getInstance().error("main error: param is invalid, param=%s." % sys.argv[1] );
return -1;
service = CollectService(COLLECT_MODE_CMD_TOTAL, argv[2:]);
elif "-f" == argv[1]:
if not Check.checkFileModeParam(argv[2:]):
Logger.getInstance().error("main error: param is invalid, param=%s." % sys.argv[1] );
return -1;
service = CollectService(COLLECT_MODE_CMD_FILE, argv[2:]);
else:
Logger.getInstance().error("main error: option param is invalid optoion : [%s]" % (argv[1]));
return -1
return service.start();
except Exception, e:
Logger.getInstance().exception("main exception: collect device info exception: [%s]" % e);
return -1
if __name__ == "__main__":
timeInteval = 300
while True:
t = Timer(timeInteval ,sys.exit(main(sys.argv)) )
t.start()
time.sleep(300)
|
[
"31431891+serverplugin@users.noreply.github.com"
] |
31431891+serverplugin@users.noreply.github.com
|
9d7664af768702a3da5d5567e409f84faf975d8a
|
4380a4029bac26f205ed925026914dce9e96fff0
|
/slyr/parser/exceptions.py
|
d791f4c9f6a447c4501d075e561829ba69832613
|
[] |
no_license
|
deepVector/slyr
|
6b327f835994c8f20f0614eb6c772b90aa2d8536
|
5d532ac3eec0e00c5883bf873d30c6b18a4edf30
|
refs/heads/master
| 2020-12-03T10:24:39.660904
| 2019-04-08T00:48:03
| 2019-04-08T00:48:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
#!/usr/bin/env python
"""
SLYR Exceptions
"""
class UnsupportedVersionException(Exception):
"""
Thrown when an object of an unsupported version is encountered
"""
pass
class UnreadableSymbolException(Exception):
"""
Thrown when a symbol could not be read, for whatever reason
"""
pass
class NotImplementedException(Exception):
"""
Thrown when attempting to read/convert an object, which is known
but not yet implemented
"""
pass
class UnknownGuidException(Exception):
"""
Thrown on encountering an unknown GUID
"""
pass
class InvalidColorException(Exception):
"""
Thrown when an error was encountered while converting a color
"""
pass
class UnknownPictureTypeException(Exception):
"""
Thrown on encountering an unknown picture type
"""
pass
class UnreadablePictureException(Exception):
"""
Thrown on encountering an unreadable picture
"""
pass
|
[
"nyall.dawson@gmail.com"
] |
nyall.dawson@gmail.com
|
bbf9c3db561c3a9339d630028112a6794a730e5e
|
db734d1c2fa1ff072c3bad3efbc80f5fb045647b
|
/examples/advanced_symmetry.py
|
5b838beba33fb51eb88a5bfddea66a81e01fb2ff
|
[
"MIT"
] |
permissive
|
yenchunlin024/PyQchem
|
ff4a0f9062124c3ef47dba5e7c48b372e4a99c21
|
2edf984ba17373ad3fd450b18592c8b7827b72e5
|
refs/heads/master
| 2023-08-12T09:43:46.942362
| 2021-09-30T10:59:35
| 2021-09-30T10:59:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,469
|
py
|
import numpy as np
from pyqchem.symmetry import get_wf_symmetry
from pyqchem.utils import _set_zero_to_coefficients, get_plane, crop_electronic_structure
from pyqchem.qchem_core import get_output_from_qchem, create_qchem_input
from pyqchem.structure import Structure
from pyqchem.file_io import build_fchk
# Define custom classification function
def get_custom_orbital_classification(parsed_fchk,
center=None,
orientation=(0, 0, 1)
):
molsym = get_wf_symmetry(parsed_fchk['structure'],
parsed_fchk['basis'],
parsed_fchk['coefficients'],
center=center,
orientation=orientation)
sh_index = molsym.SymLab.index('i') # operation used to separate orbitals
orbital_type = []
for i, overlap in enumerate(molsym.mo_SOEVs_a[:, sh_index]):
overlap = overlap / molsym.mo_SOEVs_a[i, molsym.SymLab.index('E')] # normalize
if overlap < 0:
orbital_type.append([' NOO', np.abs(overlap)])
else:
orbital_type.append([' YES', np.abs(overlap)])
return orbital_type
dimer_ethene = [[0.0, 0.0000, 0.65750],
[0.0, 0.0000, -0.65750],
[0.0, 0.92281, 1.22792],
[0.0, -0.92281, 1.22792],
[0.0, -0.92281, -1.22792],
[0.0, 0.92281, -1.22792],
[3.7, 0.00000, 0.65750],
[3.7, 0.00000, -0.65750],
[3.7, 0.92281, 1.22792],
[3.7, -0.92281, 1.22792],
[3.7, -0.92281, -1.22792],
[3.7, 0.92281, -1.22792]]
symbols = ['C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'H', 'H', 'H', 'H']
range_f1 = range(0, 6)
range_f2 = range(6, 12)
# create molecule
molecule = Structure(coordinates=dimer_ethene,
symbols=symbols,
charge=0,
multiplicity=1)
# create Q-Chem input
qc_input = create_qchem_input(molecule,
jobtype='sp',
exchange='hf',
basis='6-31G')
print(qc_input.get_txt())
# get data from Q-Chem calculation
output, electronic_structure = get_output_from_qchem(qc_input,
processors=4,
force_recalculation=False,
read_fchk=True,
fchk_only=True)
# store original fchk info in file
open('test.fchk', 'w').write(build_fchk(electronic_structure))
# get symmetry classification
electronic_structure_f1 = crop_electronic_structure(electronic_structure, range_f1)
# save test fchk file with new coefficients
open('test_f1.fchk', 'w').write(build_fchk(electronic_structure_f1))
# get plane from coordinates
coordinates_f1 = electronic_structure['structure'].get_coordinates(fragment=range_f1)
center_f1, normal_f1 = get_plane(coordinates_f1)
# get classified orbitals
orbital_type_f1 = get_custom_orbital_classification(electronic_structure_f1,
center=center_f1,
orientation=normal_f1)
# get plane from coordinates
coordinates_f2 = electronic_structure['structure'].get_coordinates(fragment=range_f2)
center_f2, normal_f2 = get_plane(coordinates_f2)
electronic_structure_f2 = crop_electronic_structure(electronic_structure, range_f2)
# save test fchk file with new coefficients
open('test_f2.fchk', 'w').write(build_fchk(electronic_structure_f2))
# get classified orbitals
orbital_type_f2 = get_custom_orbital_classification(electronic_structure_f2,
center=center_f2,
orientation=normal_f2)
# range of orbitals to show
frontier_orbitals = [12, 13, 14, 15, 16, 17, 18, 19, 20]
# Print results in table
print('Inversion center?')
print('index fragment 1 fragment 2')
for i in frontier_orbitals:
print(' {:4} {:4} {:4.3f} {:4} {:4.3f}'.format(i,
orbital_type_f1[i-1][0], orbital_type_f1[i-1][1],
orbital_type_f2[i-1][0], orbital_type_f2[i-1][1]))
|
[
"abelcarreras83@gmail.com"
] |
abelcarreras83@gmail.com
|
cc7027ec8852029b29739182b583c126f29a16cf
|
4ab57a7bd592d267d180f0541ee18b4c544eec28
|
/tests/orm/mixins/test_soft_deletes.py
|
bba8c0e6fcda1c345551add5b8b6bc09638e0e42
|
[
"MIT"
] |
permissive
|
mattcl/orator
|
f6cfb687ef8f1c3f5dd9828b2b950edbb5387cc9
|
cc3d2154d596f7e6ff4274d7f8d6e8a233e12a9c
|
refs/heads/0.8
| 2021-01-20T17:27:16.342669
| 2016-06-02T21:55:00
| 2016-06-02T21:55:00
| 66,998,160
| 0
| 1
| null | 2018-02-22T21:29:24
| 2016-08-31T03:08:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
# -*- coding: utf-8 -*-
import datetime
import arrow
from flexmock import flexmock, flexmock_teardown
from orator import Model, SoftDeletes
from orator.orm import Builder
from orator.query import QueryBuilder
from ... import OratorTestCase
t = arrow.get().naive
class SoftDeletesTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_delete_sets_soft_deleted_column(self):
model = flexmock(SoftDeleteModelStub())
model.set_exists(True)
builder = flexmock(Builder)
query_builder = flexmock(QueryBuilder(None, None, None))
query = Builder(query_builder)
model.should_receive('new_query').and_return(query)
builder.should_receive('where').once().with_args('id', 1).and_return(query)
builder.should_receive('update').once().with_args({'deleted_at': t})
model.delete()
self.assertIsInstance(model.deleted_at, datetime.datetime)
def test_restore(self):
model = flexmock(SoftDeleteModelStub())
model.set_exists(True)
model.should_receive('save').once()
model.restore()
self.assertIsNone(model.deleted_at)
class SoftDeleteModelStub(SoftDeletes, Model):
def get_key(self):
return 1
def get_key_name(self):
return 'id'
def from_datetime(self, value):
return t
|
[
"sebastien.eustace@gmail.com"
] |
sebastien.eustace@gmail.com
|
72d8d8ac47ceb9f682800efef9aa102bd121eab5
|
caa06eca3eef2549d5088f6487201f734b35822e
|
/NLP-PGN/utils/config_bak.py
|
538451b3b9977af8e798069ab9f3e4cf5672d3bb
|
[] |
no_license
|
kelvincjr/shared
|
f947353d13e27530ba44ea664e27de51db71a5b6
|
4bc4a12b0ab44c6847a67cbd7639ce3c025f38f8
|
refs/heads/master
| 2023-06-23T19:38:14.801083
| 2022-05-17T09:45:22
| 2022-05-17T09:45:22
| 141,774,490
| 6
| 1
| null | 2023-06-12T21:30:07
| 2018-07-21T02:22:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
"""
@Time : 2021/2/814:06
@Auth : 周俊贤
@File :config.py
@DESCRIPTION:
"""
from typing import Optional
import torch
# General
hidden_size: int = 512
dec_hidden_size: Optional[int] = 512
embed_size: int = 512
pointer = True
# Data
max_vocab_size = 20000
embed_file: Optional[str] = None # use pre-trained embeddings
source = 'big_samples' # use value: train or big_samples
data_path: str = './data/data/train.txt'
val_data_path = './data/data/dev.txt'
test_data_path = './data/data/test.txt'
stop_word_file = './data/data/HIT_stop_words.txt'
max_src_len: int = 300 # exclusive of special tokens such as EOS
max_tgt_len: int = 100 # exclusive of special tokens such as EOS
truncate_src: bool = True
truncate_tgt: bool = True
min_dec_steps: int = 30
max_dec_steps: int = 100
enc_rnn_dropout: float = 0.5
enc_attn: bool = True
dec_attn: bool = True
dec_in_dropout = 0
dec_rnn_dropout = 0
dec_out_dropout = 0
# Training
trunc_norm_init_std = 1e-4
eps = 1e-31
learning_rate = 0.001
lr_decay = 0.0
initial_accumulator_value = 0.1
epochs = 8
batch_size = 8 #16
coverage = False
fine_tune = False
scheduled_sampling = False
weight_tying = False
max_grad_norm = 2.0
is_cuda = True
DEVICE = torch.device("cuda" if is_cuda else "cpu")
LAMBDA = 1
output_dir = "./output"
if pointer:
if coverage:
if fine_tune:
model_name = 'ft_pgn'
else:
model_name = 'cov_pgn'
elif scheduled_sampling:
model_name = 'ss_pgn'
elif weight_tying:
model_name = 'wt_pgn'
else:
if source == 'big_samples':
model_name = 'pgn_big_samples'
else:
model_name = 'pgn'
else:
model_name = 'baseline'
# Beam search
beam_size: int = 3
alpha = 0.2
beta = 0.2
gamma = 0.6
|
[
"deco_2004@163.com"
] |
deco_2004@163.com
|
01e664b7f39575e1a63a4ddf8b5dfefab7300952
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Python modules/SettlementRegenerate.py
|
f5f2ad83e3c14991b4f4775c28b52ee0110c5cdb
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,110
|
py
|
"""----------------------------------------------------------------------------------------------------------
MODULE : SettlementRegenerate
PURPOSE : This module will regenerate a settlement.
DEPARTMENT AND DESK : IT
REQUASTER : Heinrich Cronje
DEVELOPER : Heinrich Cronje
CR NUMBER :
-------------------------------------------------------------------------------------------------------------
HISTORY
=============================================================================================================
Date Change no Developer Description
-------------------------------------------------------------------------------------------------------------
2011-08-22 Heinrich Cronje Front Arena Upgrade 2010.2.
2019-07-24 FAU-312 Cuen Edwards Replaced custom regenerate functionality with
call to Front Arena command. Added security
on menu item.
-------------------------------------------------------------------------------------------------------------
"""
import acm
from at_logging import getLogger
import FUxCore
LOGGER = getLogger(__name__)
def _confirm_regenerate(shell, settlements):
"""
Prompt the user to confirm regeneration of the currently selected
settlements.
"""
message = "The command Regenerate will be executed on the "
if settlements.Size() == 1:
message += "selected settlement."
elif settlements.Size() > 1:
message += "{number} selected settlements.".format(
number=settlements.Size()
)
message += "\n\nDo you want to continue?"
return acm.UX.Dialogs().MessageBoxYesNo(shell, 'Question', message) == 'Button1'
def _regenerate(settlements):
"""
Regenerate the specified settlements.
"""
failures = {}
for settlement in settlements:
try:
command = acm.FRegeneratePayment(settlement)
command.Execute()
command.CommitResult()
LOGGER.info('Regenerated settlement {oid}.'.format(
oid=settlement.Oid()
))
except Exception as exception:
failures[settlement] = exception
LOGGER.warn('Failed to regenerate settlement {oid}.'.format(
oid=settlement.Oid()
))
return failures
def _display_failures(shell, failures):
"""
Display a list of settlements that failed to regenerate along
with the associated exceptions.
"""
settlements = list(failures.keys())
settlements.sort(key=lambda s: s.Oid())
message = "The following settlements failed to regenerate:\n"
for settlement in settlements:
message += "\n- {oid} - {exception}".format(
oid=settlement.Oid(),
exception=failures[settlement]
)
acm.UX.Dialogs().MessageBoxOKCancel(shell, 'Warning', message)
class MenuItem(FUxCore.MenuItem):
"""
Menu item used to trigger the 'Regenerate Payment' command.
"""
def __init__(self, extension_object):
"""
Constructor.
"""
pass
@FUxCore.aux_cb
def Invoke(self, eii):
"""
Perform the action on the menu item being invoked.
"""
if not self._user_has_access():
return
shell = eii.Parameter('shell')
settlements = eii.ExtensionObject()
if _confirm_regenerate(shell, settlements):
failures = _regenerate(settlements)
if len(failures) > 0:
_display_failures(shell, failures)
@FUxCore.aux_cb
def Applicable(self):
"""
Determine whether or not the menu item should be visible
(shown at all).
"""
return self._user_has_access()
@FUxCore.aux_cb
def Enabled(self):
"""
Determine whether or not the menu item should be enabled
(vs greyed-out).
"""
return self._user_has_access()
@FUxCore.aux_cb
def Checked(self):
"""
Determine whether or not the menu item should be checked
(have a check mark).
"""
return False
@staticmethod
def _user_has_access():
"""
Determine whether or not a user should have access to the
menu item.
"""
if not acm.User().IsAllowed('Authorise Settlement', 'Operation'):
return False
if not acm.User().IsAllowed('Edit Settlements', 'Operation'):
return False
if not acm.User().IsAllowed('Regenerate Settlement', 'Operation'):
return False
return True
@FUxCore.aux_cb
def create_menu_item(extension_object):
"""
Function used to create and return the menu item.
This function is referenced from the 'Regenerate Payment'
FMenuExtension.
"""
return MenuItem(extension_object)
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
4e5832c5c5b4c8807e7bcdabe9568f504fedc426
|
67b8ea7f463e76a74d5144202952e6c8c26a9b75
|
/cluster-env/bin/undill
|
455df88dec71f58462d5ce48b4a02c95dad99b63
|
[
"MIT"
] |
permissive
|
democrazyx/elecsim
|
5418cb99962d7ee2f9f0510eb42d27cd5254d023
|
e5b8410dce3cb5fa2e869f34998dfab13161bc54
|
refs/heads/master
| 2023-06-27T09:31:01.059084
| 2021-07-24T19:39:36
| 2021-07-24T19:39:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
#!/home/alexkell/elecsim/cluster-env/bin/python3
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
"""
unpickle the contents of a pickled object file
Examples::
$ undill hello.pkl
['hello', 'world']
"""
if __name__ == '__main__':
import sys
import dill
for file in sys.argv[1:]:
print (dill.load(open(file,'rb')))
|
[
"alexander@kell.es"
] |
alexander@kell.es
|
|
63655078077b3e9d9b986b5bf295f5aae86a05c0
|
7b26ead5cca82bc8ec8cec01505435db06959284
|
/spider.py
|
8ecd93d5e9b67fa07028d64a1ee83625d4721952
|
[] |
no_license
|
mnahm5/Web-Crawler
|
dffa8725f56a1c4c9265c120b9ac5500a497bff3
|
552ca54fd13e4fc30e1315b6a22fb511d2aaf345
|
refs/heads/master
| 2021-01-19T00:52:17.912824
| 2016-06-30T12:45:25
| 2016-06-30T12:45:25
| 60,887,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
from urllib.request import urlopen
from link_finder import LinkFinder
from general import *
class Spider:
# Class variables (shared among all instances)
project_name = ""
base_url = ""
domain_name = ""
queue_file = ""
crawled_file = ""
queue = set()
crawled = set()
def __init__(self, project_name, base_url, domain_name):
Spider.project_name = project_name
Spider.base_url = base_url
Spider.domain_name = domain_name
Spider.queue_file = Spider.project_name + "/queue.txt"
Spider.crawled_file = Spider.project_name + "/crawled.txt"
self.boot()
self.crawl_page("First Spider", Spider.base_url)
@staticmethod
def boot():
create_project_dir(Spider.project_name)
create_data_files(Spider.project_name, Spider.base_url)
Spider.queue = file_to_set(Spider.queue_file)
Spider.crawled = file_to_set(Spider.crawled_file)
@staticmethod
def crawl_page(thread_name, page_url):
if page_url not in Spider.crawled:
print(thread_name + ' now crawling ' + page_url)
print("Queue " + str(len(Spider.queue)) + " | Crawled " + str(len(Spider.crawled)))
Spider.add_links_to_queue(Spider.gather_links(page_url))
Spider.queue.remove(page_url)
Spider.crawled.add(page_url)
Spider.update_files()
@staticmethod
def gather_links(page_url):
html_string = ""
try:
response = urlopen(page_url)
if response.getheader("Content-Type") == "text/html":
html_bytes = response.read()
html_string = html_bytes.decode("utf-8")
finder = LinkFinder(Spider.base_url, page_url)
finder.feed(html_string)
except:
print("Error: cannot crawl page")
return set()
return finder.page_links()
@staticmethod
def add_links_to_queue(links):
for url in links:
if url in Spider.queue:
continue
if url in Spider.crawled:
continue
if Spider.domain_name not in url:
continue
Spider.queue.add(url)
@staticmethod
def update_files():
set_to_file(Spider.queue, Spider.queue_file)
set_to_file(Spider.crawled, Spider.crawled_file)
|
[
"ahmed.nadim59@gmail.com"
] |
ahmed.nadim59@gmail.com
|
1c2e6fc89feaef8003cf91c6e3db19398008dde5
|
fa5070498f31026b662053d1d5d91282cb1f68b9
|
/test01/tapp/views.py
|
36f742b4db34a7c1ddbfc041c48b401998010c04
|
[
"Apache-2.0"
] |
permissive
|
jinguangzhu/the_first_python
|
f074c4943028421f96285a2f772e7ccf102248e5
|
d9d035b44652a4cd6ecd1834dd9930d1c78bf360
|
refs/heads/master
| 2020-03-19T14:16:35.860167
| 2018-06-19T13:58:25
| 2018-06-19T13:58:25
| 136,615,947
| 0
| 3
|
Apache-2.0
| 2018-06-19T13:31:00
| 2018-06-08T12:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
from django.shortcuts import render
from tapp.models import *
# Create your views here.
def my_student(req):
student = Student.objects.all()
return render(req,"student.html",context={"student":student})
def first(req):
return render(req,"hellodjango.html")
|
[
"ubuntu@localhost.localdomain"
] |
ubuntu@localhost.localdomain
|
79d9d6cab1424a8f758f9bc427220aa90cc5ea9a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/40dea17339b24a50970c59a9ab7f2661.py
|
2b91b02b1c565cfce002da2177d0c653c3e0759a
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
#
# Skeleton file for the Python "Bob" exercise.
#
def hey(phrase):
if not phrase.strip() == '':
if any(c.isalpha() for c in phrase) and not any(
c.islower() for c in phrase):
return 'Whoa, chill out!'
elif phrase.endswith('?'):
return 'Sure.'
return 'Whatever.'
return 'Fine. Be that way!'
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
5455dbfa3f6bdb95fbe0d82fe40400246f03ff85
|
d5beb80c402954d1b66f765b5d5c93d28491324d
|
/evtstrd_test/filter.py
|
854a71c52f33622147a05d857a11213a9abf3fb8
|
[
"MIT"
] |
permissive
|
srittau/eventstreamd
|
978d822d7dd504f91aebdf11091733a04bb4c5c2
|
688ee94aea704230e2d0693195062cea8ba3eb73
|
refs/heads/main
| 2023-08-18T21:27:23.962517
| 2023-08-17T09:55:24
| 2023-08-17T09:55:24
| 85,480,241
| 0
| 0
|
MIT
| 2023-09-08T06:45:51
| 2017-03-19T14:00:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
from unittest import TestCase
from asserts import assert_equal, assert_false, assert_raises, assert_true
from evtstrd.filters import parse_filter
class FilterTest(TestCase):
def test_str(self) -> None:
filter_ = parse_filter("foo.bar<='ABC'")
assert_equal("foo.bar<='ABC'", str(filter_))
def test_string_filter__path_not_found(self) -> None:
filter_ = parse_filter("foo.bar<='ABC'")
assert_false(filter_({"foo": {}}))
def test_string_filter__wrong_type(self) -> None:
filter_ = parse_filter("foo.bar<='50'")
assert_false(filter_({"foo": {"bar": 13}}))
def test_string_filter__compare(self) -> None:
filter_ = parse_filter("foo.bar<='ABC'")
assert_true(filter_({"foo": {"bar": "AAA"}}))
assert_true(filter_({"foo": {"bar": "ABC"}}))
assert_false(filter_({"foo": {"bar": "CAA"}}))
def test_string_filter__lt(self) -> None:
filter_ = parse_filter("foo.bar<'ABC'")
assert_true(filter_({"foo": {"bar": "AAA"}}))
assert_false(filter_({"foo": {"bar": "ABC"}}))
assert_false(filter_({"foo": {"bar": "CAA"}}))
def test_string_filter__gt(self) -> None:
filter_ = parse_filter("foo.bar>'ABC'")
assert_false(filter_({"foo": {"bar": "AAA"}}))
assert_false(filter_({"foo": {"bar": "ABC"}}))
assert_true(filter_({"foo": {"bar": "CAA"}}))
class ParseFilterTest(TestCase):
def test_invalid_filter(self) -> None:
with assert_raises(ValueError):
parse_filter("INVALID")
def test_invalid_values(self) -> None:
with assert_raises(ValueError):
parse_filter("foo=bar")
with assert_raises(ValueError):
parse_filter("foo='bar")
with assert_raises(ValueError):
parse_filter("foo='")
with assert_raises(ValueError):
parse_filter("foo=2000-12-32")
def test_no_such_field(self) -> None:
f = parse_filter("foo<=10")
assert_false(f({}))
def test_wrong_type(self) -> None:
f = parse_filter("foo<=10")
assert_false(f({"foo": ""}))
def test_eq_int(self) -> None:
f = parse_filter("foo=10")
assert_false(f({"foo": 9}))
assert_true(f({"foo": 10}))
assert_false(f({"foo": 11}))
def test_le_int(self) -> None:
f = parse_filter("foo<=10")
assert_true(f({"foo": 9}))
assert_true(f({"foo": 10}))
assert_false(f({"foo": 11}))
def test_ge_int(self) -> None:
f = parse_filter("foo>=10")
assert_false(f({"foo": 9}))
assert_true(f({"foo": 10}))
assert_true(f({"foo": 11}))
def test_eq_str(self) -> None:
f = parse_filter("foo='bar'")
assert_false(f({"foo": "baz"}))
assert_true(f({"foo": "bar"}))
def test_eq_date(self) -> None:
f = parse_filter("foo=2016-03-24")
assert_false(f({"foo": "2000-01-01"}))
assert_true(f({"foo": "2016-03-24"}))
def test_nested_value(self) -> None:
f = parse_filter("foo.bar<=10")
assert_true(f({"foo": {"bar": 10}}))
|
[
"srittau@rittau.biz"
] |
srittau@rittau.biz
|
3dedf611bc54472811b3f467db4eb932c8506bf7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03971/s127085901.py
|
34b5ed2240205dc9db4f33c7ab3dd930dddf6d8a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
n,a,b=map(int,input().split())
s=input()
passed=0
abroad_passed=0
for i in s:
if i=="a":
if passed<a+b:
print("Yes")
passed+=1
else:
print("No")
elif i=="b":
if passed<a+b and abroad_passed<=b-1:
print("Yes")
passed+=1
abroad_passed+=1
else:
print("No")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0f8ebdd234606243284b482e06e4083e1328c38d
|
3ef0fd7ff4ab98da91de28b4a3ae6bbd55a38361
|
/wxrobot-host/wxrobot/baidu_shibie.py
|
7c2115361e6ee26f68503dd32ffd03c7d4f6470f
|
[] |
no_license
|
nudepig/wxrobot
|
d0cbcbe0b1fb0a69532bb2c45630bc01ded8c2af
|
82bd8f68d3163d8dddf1b9a8ccc14532f040fbab
|
refs/heads/master
| 2020-12-27T13:35:24.043856
| 2020-02-03T09:30:04
| 2020-02-03T09:30:04
| 237,920,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,770
|
py
|
# from aip import AipOcr # 如果已安装pip,执行pip install baidu-aip即可
# import os
# """ 你的 APPID AK SK """
# APP_ID = '16802142'
# API_KEY = 'FcIxTPz25FZOSjOfgTKfAWIn'
# SECRET_KEY = 'GKIvG4tFqqyzisDCY81ASkMihg3LHrwx'
#
# client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
# """ 读取图片 """
# def get_file_content(filePath): # 读取图片
# with open(filePath, 'rb') as fp:
# return fp.read()
#
# def image_identify(picture):
# image = get_file_content(picture)
# # print(image)
# # time_one = time.time()
# result = client.basicAccurate(image) # 获取百度识别的结果
# # time_two = time.time()
# # print(time_two - time_one)
# # if time_two - time_one > 6:
# # else:
# if os.path.exists('result.txt'):
# os.remove('result.txt')
# for result_words in list(result['words_result']): # 提取返回结果
# with open('result.txt', 'a+', encoding='utf-8') as file:
# file.write(result_words['words'] + '\n')
# with open('result.txt', 'r', encoding='utf-8') as file:
# result_input = file.read()
# return result_input # 返回识别的文字结果,文字分行
#
# picture = r'f43a9ae3508254911d9b551d3b0a2d5.png'
# image_identify(picture)
# encoding:utf-8
# 旧版api
import requests
import base64
import os
'''
通用文字识别(高精度版)
'''
def image_identify(picture):
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=v6ChGHmbOGNu5yyP1bchGYmF&client_secret=RSLGkQm44tYEti0m7dfg2GGgAibFKkZ2'
access_token = requests.get(host)
request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic"
# 二进制方式打开图片文件
f = open(picture, 'rb')
img = base64.b64encode(f.read())
access_token = access_token.json()
access_token = access_token['access_token']
params = {"image": img}
# access_token = '[调用鉴权接口获取的token]'
request_url = '{}?access_token={}'.format(request_url, access_token)
headers = {'content-type': 'application/x-www-form-urlencoded'}
response = requests.post(request_url, data=params, headers=headers)
response = response.json()
if os.path.exists('result.txt'):
os.remove('result.txt')
for result_words in list(response['words_result']): # 提取返回结果
with open('result.txt', 'a+', encoding='utf-8') as file:
file.write(result_words['words'] + '\n')
with open('result.txt', 'r', encoding='utf-8') as file:
result_input = file.read()
return result_input
|
[
"ubuntu@localhost.localdomain"
] |
ubuntu@localhost.localdomain
|
526a36db003f6b888927cfb7031603fc97188b7a
|
2c143ba64032f65c7f7bf1cbd567a1dcf13d5bb1
|
/整数转罗马数字.py
|
2a08f7032b28b37c736f253256397e561ff86593
|
[] |
no_license
|
tx991020/MyLeetcode
|
5b6121d32260fb30b12cc8146e44e6c6da03ad89
|
cfe4f087dfeb258caebbc29fc366570ac170a68c
|
refs/heads/master
| 2020-04-09T21:43:41.403553
| 2019-03-27T18:54:35
| 2019-03-27T18:54:35
| 160,611,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
'''
罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。
字符 数值
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。
通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:
I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。
X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。
C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。
给定一个整数,将其转为罗马数字。输入确保在 1 到 3999 的范围内。
示例 1:
输入: 3
输出: "III"
示例 2:
输入: 4
输出: "IV"
示例 3:
输入: 9
输出: "IX"
'''
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
|
[
"wudi@hetao101.com"
] |
wudi@hetao101.com
|
5ee73a5d7a4d21e8ff1b542b13b9828616bbdac6
|
e02dbefe9f362c3e9b2849c1e22c0ab27e010164
|
/이것이 코딩 테스트다 - 연습문제/19. 1로 만들기.py
|
18fba447785ec8766aa41a48e8bf4090b6b8e8c1
|
[] |
no_license
|
hoyeoon/CodingTest
|
ac77574539a7a96cbdb64eb1768ba20ab6ad3b4f
|
4d34b422f0dc85f3d506a6c997f3fa883b7162ab
|
refs/heads/master
| 2023-06-05T17:43:38.348537
| 2021-06-28T10:05:22
| 2021-06-28T10:05:22
| 378,081,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
d = [0] * 30000
d[1], d[2], d[3], d[4], d[5] = 0, 1, 1, 2, 1
x = int(input())
for i in range(6, x + 1):
if i % 2 == 0 and i % 3 == 0 and i % 5 == 0:
d[i] = min(d[i - 1], d[i // 2], d[i // 3], d[i // 5]) + 1
elif i % 2 == 0 and i % 3 == 0:
d[i] = min(d[i - 1], d[i // 2], d[i // 3]) + 1
elif i % 2 == 0 and i % 5 == 0:
d[i] = min(d[i - 1], d[i // 2], d[i // 5]) + 1
elif i % 3 == 0 and i % 5 == 0:
d[i] = min(d[i - 1], d[i // 3], d[i // 5]) + 1
elif i % 5 == 0:
d[i] = min(d[i - 1], d[i // 5]) + 1
elif i % 3 == 0:
d[i] = min(d[i - 1], d[i // 3]) + 1
elif i % 2 == 0:
d[i] = min(d[i - 1], d[i // 2]) + 1
else:
d[i] = d[i - 1] + 1
print(d[x])
|
[
"chy1995@ajou.ac.kr"
] |
chy1995@ajou.ac.kr
|
54ceb5d460c811307a4e5e8a7f54e6b990c302b3
|
0fbd56d4a2ee512cb47f557bea310618249a3d2e
|
/official/vision/image_classification/configs/base_configs.py
|
efdcdc0b4327871dd04a854f057cbcdf84a9db9e
|
[
"Apache-2.0"
] |
permissive
|
joppemassant/models
|
9968f74f5c48096f3b2a65e6864f84c0181465bb
|
b2a6712cbe6eb9a8639f01906e187fa265f3f48e
|
refs/heads/master
| 2022-12-10T01:29:31.653430
| 2020-09-11T11:26:59
| 2020-09-11T11:26:59
| 294,675,920
| 1
| 1
|
Apache-2.0
| 2020-09-11T11:21:51
| 2020-09-11T11:21:51
| null |
UTF-8
|
Python
| false
| false
| 7,936
|
py
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definitions for high level configuration groups.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, List, Mapping, Optional
import dataclasses
from official.modeling import hyperparams
from official.modeling.hyperparams import config_definitions
CallbacksConfig = config_definitions.CallbacksConfig
TensorboardConfig = config_definitions.TensorboardConfig
RuntimeConfig = config_definitions.RuntimeConfig
@dataclasses.dataclass
class ExportConfig(hyperparams.Config):
"""Configuration for exports.
Attributes:
checkpoint: the path to the checkpoint to export.
destination: the path to where the checkpoint should be exported.
"""
checkpoint: str = None
destination: str = None
@dataclasses.dataclass
class MetricsConfig(hyperparams.Config):
"""Configuration for Metrics.
Attributes:
accuracy: Whether or not to track accuracy as a Callback. Defaults to None.
top_5: Whether or not to track top_5_accuracy as a Callback. Defaults to
None.
"""
accuracy: bool = None
top_5: bool = None
@dataclasses.dataclass
class TimeHistoryConfig(hyperparams.Config):
"""Configuration for the TimeHistory callback.
Attributes:
log_steps: Interval of steps between logging of batch level stats.
"""
log_steps: int = None
@dataclasses.dataclass
class TrainConfig(hyperparams.Config):
"""Configuration for training.
Attributes:
resume_checkpoint: Whether or not to enable load checkpoint loading.
Defaults to None.
epochs: The number of training epochs to run. Defaults to None.
steps: The number of steps to run per epoch. If None, then this will be
inferred based on the number of images and batch size. Defaults to None.
callbacks: An instance of CallbacksConfig.
metrics: An instance of MetricsConfig.
tensorboard: An instance of TensorboardConfig.
set_epoch_loop: Whether or not to set `experimental_steps_per_execution` to
equal the number of training steps in `model.compile`. This reduces the
number of callbacks run per epoch which significantly improves end-to-end
TPU training time.
"""
resume_checkpoint: bool = None
epochs: int = None
steps: int = None
callbacks: CallbacksConfig = CallbacksConfig()
metrics: MetricsConfig = None
tensorboard: TensorboardConfig = TensorboardConfig()
time_history: TimeHistoryConfig = TimeHistoryConfig()
set_epoch_loop: bool = False
@dataclasses.dataclass
class EvalConfig(hyperparams.Config):
"""Configuration for evaluation.
Attributes:
epochs_between_evals: The number of train epochs to run between evaluations.
Defaults to None.
steps: The number of eval steps to run during evaluation. If None, this will
be inferred based on the number of images and batch size. Defaults to
None.
skip_eval: Whether or not to skip evaluation.
"""
epochs_between_evals: int = None
steps: int = None
skip_eval: bool = False
@dataclasses.dataclass
class LossConfig(hyperparams.Config):
"""Configuration for Loss.
Attributes:
name: The name of the loss. Defaults to None.
label_smoothing: Whether or not to apply label smoothing to the loss. This
only applies to 'categorical_cross_entropy'.
"""
name: str = None
label_smoothing: float = None
@dataclasses.dataclass
class OptimizerConfig(hyperparams.Config):
"""Configuration for Optimizers.
Attributes:
name: The name of the optimizer. Defaults to None.
decay: Decay or rho, discounting factor for gradient. Defaults to None.
epsilon: Small value used to avoid 0 denominator. Defaults to None.
momentum: Plain momentum constant. Defaults to None.
nesterov: Whether or not to apply Nesterov momentum. Defaults to None.
moving_average_decay: The amount of decay to apply. If 0 or None, then
exponential moving average is not used. Defaults to None.
lookahead: Whether or not to apply the lookahead optimizer. Defaults to
None.
beta_1: The exponential decay rate for the 1st moment estimates. Used in the
Adam optimizers. Defaults to None.
beta_2: The exponential decay rate for the 2nd moment estimates. Used in the
Adam optimizers. Defaults to None.
epsilon: Small value used to avoid 0 denominator. Defaults to 1e-7.
"""
name: str = None
decay: float = None
epsilon: float = None
momentum: float = None
nesterov: bool = None
moving_average_decay: Optional[float] = None
lookahead: Optional[bool] = None
beta_1: float = None
beta_2: float = None
epsilon: float = None
@dataclasses.dataclass
class LearningRateConfig(hyperparams.Config):
"""Configuration for learning rates.
Attributes:
name: The name of the learning rate. Defaults to None.
initial_lr: The initial learning rate. Defaults to None.
decay_epochs: The number of decay epochs. Defaults to None.
decay_rate: The rate of decay. Defaults to None.
warmup_epochs: The number of warmup epochs. Defaults to None.
batch_lr_multiplier: The multiplier to apply to the base learning rate, if
necessary. Defaults to None.
examples_per_epoch: the number of examples in a single epoch. Defaults to
None.
boundaries: boundaries used in piecewise constant decay with warmup.
multipliers: multipliers used in piecewise constant decay with warmup.
scale_by_batch_size: Scale the learning rate by a fraction of the batch
size. Set to 0 for no scaling (default).
staircase: Apply exponential decay at discrete values instead of continuous.
"""
name: str = None
initial_lr: float = None
decay_epochs: float = None
decay_rate: float = None
warmup_epochs: int = None
examples_per_epoch: int = None
boundaries: List[int] = None
multipliers: List[float] = None
scale_by_batch_size: float = 0.
staircase: bool = None
@dataclasses.dataclass
class ModelConfig(hyperparams.Config):
"""Configuration for Models.
Attributes:
name: The name of the model. Defaults to None.
model_params: The parameters used to create the model. Defaults to None.
num_classes: The number of classes in the model. Defaults to None.
loss: A `LossConfig` instance. Defaults to None.
optimizer: An `OptimizerConfig` instance. Defaults to None.
"""
name: str = None
model_params: hyperparams.Config = None
num_classes: int = None
loss: LossConfig = None
optimizer: OptimizerConfig = None
@dataclasses.dataclass
class ExperimentConfig(hyperparams.Config):
"""Base configuration for an image classification experiment.
Attributes:
model_dir: The directory to use when running an experiment.
mode: e.g. 'train_and_eval', 'export'
runtime: A `RuntimeConfig` instance.
train: A `TrainConfig` instance.
evaluation: An `EvalConfig` instance.
model: A `ModelConfig` instance.
export: An `ExportConfig` instance.
"""
model_dir: str = None
model_name: str = None
mode: str = None
runtime: RuntimeConfig = None
train_dataset: Any = None
validation_dataset: Any = None
train: TrainConfig = None
evaluation: EvalConfig = None
model: ModelConfig = None
export: ExportConfig = None
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
55e3fff99a6a53657ed6aa3797ba4ebd66cd1a7a
|
be84495751737bbf0a8b7d8db2fb737cbd9c297c
|
/renlight/tests/renderer/test_sampler.py
|
b751d8606bc7eb66c363b055ea2f3a538bd86591
|
[] |
no_license
|
mario007/renmas
|
5e38ff66cffb27b3edc59e95b7cf88906ccc03c9
|
bfb4e1defc88eb514e58bdff7082d722fc885e64
|
refs/heads/master
| 2021-01-10T21:29:35.019792
| 2014-08-17T19:11:51
| 2014-08-17T19:11:51
| 1,688,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,747
|
py
|
import unittest
from tdasm import Runtime
from renlight.sdl.shader import Shader
from renlight.sdl import FloatArg, IntArg
from renlight.renderer.sampler import Sampler
class SamplerTest(unittest.TestCase):
def test_sampler(self):
sam = Sampler()
sam.set_resolution(2, 2)
sam.load('regular')
sam.compile()
runtimes = [Runtime()]
sam.prepare(runtimes)
code = """
sample = Sample()
r1 = generate_sample(sample)
p1 = sample.x
p2 = sample.y
p3 = sample.ix
p4 = sample.iy
"""
p1 = FloatArg('p1', 566.6)
p2 = FloatArg('p2', 566.6)
p3 = IntArg('p3', 5655)
p4 = IntArg('p4', 5655)
r1 = IntArg('r1', 5655)
args = [p1, p2, p3, p4, r1]
shader = Shader(code=code, args=args)
shader.compile([sam.shader])
shader.prepare(runtimes)
shader.execute()
self._check_result(shader, -0.5, -0.5, 0, 0, 1)
shader.execute()
self._check_result(shader, 0.5, -0.5, 1, 0, 1)
shader.execute()
self._check_result(shader, -0.5, 0.5, 0, 1, 1)
shader.execute()
self._check_result(shader, 0.5, 0.5, 1, 1, 1)
shader.execute()
ret = shader.get_value('r1')
self.assertEqual(ret, 0)
def _check_result(self, shader, p1, p2, p3, p4, r1):
t1 = shader.get_value('p1')
self.assertEqual(t1, p1)
t2 = shader.get_value('p2')
self.assertEqual(t2, p2)
t3 = shader.get_value('p3')
self.assertAlmostEqual(t3, p3)
t4 = shader.get_value('p4')
self.assertAlmostEqual(t4, p4)
k1 = shader.get_value('r1')
self.assertEqual(k1, r1)
if __name__ == "__main__":
unittest.main()
|
[
"mvidov@yahoo.com"
] |
mvidov@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.