blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4bbfa20fd19cd5b5194cd16b2af4adc22c20a60d | ded13e921c8365c6113911a5834969ec3d33f989 | /082/Remove Duplicates from Sorted List II.py | 2dd27f6dcafbf95d6357d10e5156dc9fc2abbec7 | [] | no_license | ArrayZoneYour/LeetCode | b7b785ef0907640623e5ab8eec1b8b0a9d0024d8 | d09f56d4fef859ca4749dc753d869828f5de901f | refs/heads/master | 2021-04-26T23:03:10.026205 | 2018-05-09T15:49:08 | 2018-05-09T15:49:08 | 123,922,098 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | # /usr/bin/python
# coding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
Given 1->2->3->3->4->4->5, return 1->2->5.
Given dummy->1->1->1->2->3, return 2->3.
"""
dummyHead = ListNode(0)
dummyHead.next = head
# 以下一个元素作为当前指针
if not head:
return head
cur = head
if cur.next is None:
return head
# 以dummyHead为前一个元素
pre = dummyHead
while cur is not None and cur.next is not None:
# 判断下一个元素和当前元素指针值相同
if cur.next.val == cur.val:
# 如果值相同,当前元素的next指针指向next的next,直至不同或者指向空为止
while cur.next is not None and cur.next.val == cur.val:
cur.next = cur.next.next
# 前一个元素的指针指向当前元素的下一个
pre.next = cur.next
# 判断得到下一个元素的值与当前元素指针对应的值不同
else:
pre = pre.next
# 当前指针后移
cur = cur.next
# 如果当前元素值或者下一个元素值为None,返回dummyHead.next
return dummyHead.next
node1 = ListNode(1)
node2 = ListNode(1)
node3 = ListNode(2)
node4 = ListNode(2)
node1.next = node2
node2.next = node3
node3.next = node4
Solution().deleteDuplicates(node1)
print() | [
"hustliyidong@gmail.com"
] | hustliyidong@gmail.com |
1ae02112a6b34c47b25fc53f5eeae25ccc13eca9 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/071_Get_Movie_Summary/GetMovieSummary.py | a84b1602f2c2ed7a0ca49bbc1cb1d69b1c6bb938 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # GetMovieSummary.py
# Simple app using TheMovieDb.org to retrieve movie information
# pip install tmdbsimple
#
# 2020-07-15 PV
# 2023-01-03 PV Added .mp4 suffix
import os
import tmdbsimple as tmdb # type: ignore
from typing import Iterable
# Chemin complet de tous les fichiers à partir d'une racine
def get_all_files(path: str) -> Iterable[str]:
for root, subs, files in os.walk(path):
for file in files:
yield os.path.join(root, file)
tmdb.API_KEY = 'ecdd67089c844d17e9f72a053609ed9f'
search = tmdb.Search()
processed = []
source = r'V:\Films\# A_Trier'
for fullpath in get_all_files(source):
path, file = os.path.split(fullpath)
basename, ext = os.path.splitext(file)
if ext.lower() in ['.mkv', '.avi', '.mp4']:
segments = basename.split(' - ')
title = segments[0]
s2 = title.split(' (')
title = s2[0]
if not title in processed:
processed.append(title)
print(title)
textfile = os.path.join(path, title+'.txt')
if not os.path.exists(textfile):
with open(textfile, mode='w', encoding='utf-8') as out:
response = search.movie(query=title)
s:dict
for s in search.results:
out.write(s['title']+'\n')
out.write(s.get('release_date','')+'\n')
out.write(s['overview']+'\n\n')
# #response = search.movie(query='A Few Good Men')
# #response = search.movie(query='The Black Hole')
# response = search.movie(query='La vie de Brian')
# for s in search.results:
# print(s['title'], s['release_date'], s['overview'])
# #print(s['title'], s['id'], s['release_date'], s['popularity'])
| [
"FrenchBear38@outlook.com"
] | FrenchBear38@outlook.com |
93df35b7e5445736b838d299bc73a4b524517d1e | 8ab6330e149fb4bcd303f3ca12b3e10bb08eda3e | /RPA-python/rpa_basic/excel/9_move.py | 92be2254f0d09f796407a904a104807222dc4477 | [] | no_license | jongin1004/python | 73e72b9187a0a707777e1474f5bb48f33a603e8f | 997e6bf59c71943d65447d11729a225b8e323a16 | refs/heads/main | 2023-08-05T02:44:01.673614 | 2023-08-02T07:51:56 | 2023-08-02T07:51:56 | 300,539,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from openpyxl import load_workbook
wb = load_workbook('sample.xlsx')
ws = wb.active
# 번호, 영어, 수학 -> 번호, 국어, 영어, 수학
# 이동하려는 범위의 값을 같은행(rows=0)에 열만 1칸 오른쪽(cols=1)으로 이동하도록
# 값을
# ws.move_range("B1:C11", rows=0, cols=1)
# ws['B1'].value = '국어'
# -는 반대 방향으로 이동
ws.move_range("C1:C11", rows=5, cols=-1)
wb.save('sample_modify.xlsx')
wb.close()
| [
"bill1224@naver.com"
] | bill1224@naver.com |
b20d1fbac2a2d855a426c601759521a07a9efb5c | 88620a5d4526493112c157cd7a80b160e794f956 | /testgames.py | 24bae21126ca311af9db144921e21f6d2c8abd0a | [] | no_license | sweettea/python-airmash | 6da7b225642ca188e9a3f0e33895bf91de286c4d | 3640fc96d46cce5360b4a7a866eccabea3616de6 | refs/heads/master | 2021-05-14T18:40:00.661754 | 2018-01-07T02:38:09 | 2018-01-07T02:38:09 | 116,082,157 | 0 | 1 | null | 2018-01-03T02:39:24 | 2018-01-03T02:39:24 | null | UTF-8 | Python | false | false | 441 | py | from airmash import games
url = games.get_url('eu', 'ffa1')
print("Game URL: {}".format(url))
data = games.get_data()
for region_id in data['regions']:
region = data['regions'][region_id]
print('\nRegion: {} ({})'.format(region['name'], region_id))
for game_id in region['games']:
game = region['games'][game_id]
print('{}, {} players - URL: {}'.format(game['name'], game['players'], game['url']))
print('') | [
"phil@gadgetoid.com"
] | phil@gadgetoid.com |
6f5fc2de343a2b0e2c06629972e900ea90911b5c | fda201d7cca34e216a17d97665c8457c72e66cb2 | /voting/tests/factories.py | effa73194a92f887e852dba761788d9759e92dba | [
"Apache-2.0"
] | permissive | SmartElect/SmartElect | 94ab192beb32320e9ae8ae222f90ee531037c1c6 | d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8 | refs/heads/develop | 2020-12-26T04:04:42.753741 | 2019-07-17T17:08:25 | 2019-07-17T17:08:25 | 44,687,036 | 24 | 12 | Apache-2.0 | 2020-06-06T07:16:48 | 2015-10-21T15:47:07 | Python | UTF-8 | Python | false | false | 1,479 | py | import random
from datetime import timedelta
from factory import DjangoModelFactory, SubFactory, Sequence
from factory.declarations import LazyAttribute
from factory.fuzzy import FuzzyDateTime
from django.utils.timezone import now
from voting.models import Ballot, Candidate, Election, RegistrationPeriod
start_dt = now()
class ElectionFactory(DjangoModelFactory):
class Meta:
model = Election
name_english = Sequence(lambda n: "Election %d" % n)
name_arabic = Sequence(lambda n: "Election %d (ar)" % n)
polling_start_time = FuzzyDateTime(start_dt=start_dt - timedelta(days=2),
end_dt=start_dt - timedelta(days=1))
polling_end_time = FuzzyDateTime(start_dt=start_dt + timedelta(days=2),
end_dt=start_dt + timedelta(days=3))
class BallotFactory(DjangoModelFactory):
class Meta:
model = Ballot
ballot_type = LazyAttribute(lambda o: random.choice(Ballot.VALID_RACE_TYPES))
election = SubFactory(ElectionFactory)
internal_ballot_number = Sequence(int)
class CandidateFactory(DjangoModelFactory):
class Meta:
model = Candidate
ballot = SubFactory(BallotFactory)
name_english = Sequence(lambda n: "Candidate %d" % n)
name_arabic = Sequence(lambda n: "Candidate %d (ar)" % n)
candidate_number = Sequence(int)
class RegistrationPeriodFactory(DjangoModelFactory):
class Meta:
model = RegistrationPeriod
| [
"vinod@kurup.com"
] | vinod@kurup.com |
d428a0c2923c810ba7fe622e0a9c3497156c3348 | 2979d177a9388b25a84179127a06728b44955268 | /First-Year/CA117-Labs/Lab3/reversecomp_0311.py | 597ac50e665839a62aba123af4dcd35b3601a859 | [] | no_license | BrendanSimms8898/Python | ccb2b8284aa4e187ab89d4fc34b7fe7a980950cc | 920c39fe02a26e7b131f299d5d082d13021df78b | refs/heads/master | 2023-08-16T00:58:28.742295 | 2021-10-07T12:42:14 | 2021-10-07T12:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python3
import sys
def revcom(words):
return([word for word in words if len(word) >= 5 and word[::-1] in words])
def main():
words = [word.strip() for word in sys.stdin]
print(revcom(words))
if __name__ == '__main__':
main()
| [
"brendan.simms3@mail.dcu.ie"
] | brendan.simms3@mail.dcu.ie |
daf1e49afe7cc6634eb7d0c2bc13eb678c4fa7a3 | 07527179eef5debf8932e6f8ba52742fb078c8ab | /styleguide_example/users/apis.py | 56c2e717e136029ddcef47ba199949afd095e169 | [
"MIT"
] | permissive | brunofvpp/Styleguide-Example | 5389c0ca757c2e3d0e836f3e0b3457bc4ba9960d | 0514a7dd534b1eea2a0baa5e29d05a51ff8bc41c | refs/heads/master | 2023-08-15T04:23:09.642185 | 2021-10-04T09:38:09 | 2021-10-04T09:38:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | from rest_framework.views import APIView
from rest_framework import serializers
from styleguide_example.api.mixins import ApiErrorsMixin
from styleguide_example.api.pagination import get_paginated_response, LimitOffsetPagination
from styleguide_example.users.selectors import user_list
from styleguide_example.users.models import BaseUser
# TODO: When JWT is resolved, add authenticated version
class UserListApi(ApiErrorsMixin, APIView):
class Pagination(LimitOffsetPagination):
default_limit = 1
class FilterSerializer(serializers.Serializer):
id = serializers.IntegerField(required=False)
# Important: If we use BooleanField, it will default to False
is_admin = serializers.NullBooleanField(required=False)
email = serializers.EmailField(required=False)
class OutputSerializer(serializers.ModelSerializer):
class Meta:
model = BaseUser
fields = (
'id',
'email',
'is_admin'
)
def get(self, request):
# Make sure the filters are valid, if passed
filters_serializer = self.FilterSerializer(data=request.query_params)
filters_serializer.is_valid(raise_exception=True)
users = user_list(filters=filters_serializer.validated_data)
return get_paginated_response(
pagination_class=self.Pagination,
serializer_class=self.OutputSerializer,
queryset=users,
request=request,
view=self
)
| [
"radorado@hacksoft.io"
] | radorado@hacksoft.io |
cc62bcc7205e8c86cefdb8329f467b67aa8ad039 | 9545652800884f0e54fe6595d8634c29ea4827a2 | /模拟面试/leetCode_168_串联字符串的最大长度.py | 4d63432852efd76698194bbf3a2a16c37ecacc49 | [] | no_license | challeger/leetCode | 662d9f600a40fd8970568679656f6911a6fdfb05 | d75c35b6f8ab33c158de7fa977ab0b16dac4fc25 | refs/heads/master | 2023-01-13T07:34:42.464959 | 2020-11-13T02:40:31 | 2020-11-13T02:40:31 | 286,426,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | """
day: 2020-09-14
url: https://leetcode-cn.com/problems/maximum-length-of-a-concatenated-string-with-unique-characters/
题目名: 串联字符串的最大长度
给定一个字符串数组 arr,字符串 s 是将 arr 某一子序列字符串连接所得的字符串
如果 s 中的每一个字符都只出现过一次,那么它就是一个可行解。
请返回所有可行解 s 中最长长度
示例:
输入:arr = ["un","iq","ue"]
输出:4
思路:
深度遍历,判断每一种可能性.
"""
from typing import List
class Solution:
def maxLength(self, arr: List[str]) -> int:
def is_repeat(s):
return len(s) == len(set(s))
res = 0
n = len(arr)
def dfs(index, path):
nonlocal res
if index >= n:
res = max(res, len(path))
return
foo = path + arr[index]
if is_repeat(foo):
dfs(index+1, foo)
dfs(index+1, path)
dfs(0, '')
return res
| [
"799613500@qq.com"
] | 799613500@qq.com |
054c3c33e78838f6a9ba28c44196908020f21232 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02554/s866916139.py | 1d67b2a68376143ed1bd7e8e9e1004056e4ac21c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | '''
参考
https://qiita.com/u2dayo/items/98917c94c89c77b9b3a1#c%E5%95%8F%E9%A1%8Cubiquity
'''
MOD = 10 ** 9 + 7
N = int(input())
ans = pow(10, N)
ans -= 2 * pow(9, N)
ans += pow(8, N)
ans %= MOD
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
34a096c61f612a6449607b8e24560f2e5a7200d2 | 93a7f579adc1882939a6ace69deefa4127c7c3cb | /code/chp12-scraping/selenium_form_submit.py | 5bd9167669e44eee13e7268d87a0dd337b1c45aa | [] | no_license | tschoi6712/dataWrangling | 251060bfc6c9075042d649e59a17662e0c92545c | 28df17afc706bb5ab3786d144615eb80957495b8 | refs/heads/master | 2022-12-08T17:09:56.194795 | 2019-09-24T07:19:48 | 2019-09-24T07:19:48 | 210,534,336 | 0 | 0 | null | 2022-12-07T23:33:32 | 2019-09-24T07:01:45 | HTML | UTF-8 | Python | false | false | 698 | py | from selenium import webdriver
from time import sleep
#browser = webdriver.Firefox()
browser = webdriver.Chrome('C:/chromedriver/chromedriver.exe')
browser.get('http://google.com')
inputs = browser.find_elements_by_css_selector('form input')
for i in inputs:
if i.is_displayed():
search_bar = i
break
search_bar.send_keys('web scraping with python')
search_button = browser.find_element_by_css_selector('form button')
search_button.click()
browser.implicitly_wait(10)
results = browser.find_elements_by_css_selector('div h3 a')
for r in results:
action = webdriver.ActionChains(browser)
action.move_to_element(r)
action.perform()
sleep(2)
browser.quit()
| [
"tschoi6712@gmail.com"
] | tschoi6712@gmail.com |
2359b782828f9d49623622307e680a93981e9e5f | b66c3ec94db4f6ced5d7fb3099c7af1227ea8c02 | /unit/test_zuoye1_2.py | c7bebbd30ec3ead23ed23d4c512451b5191ce0d3 | [] | no_license | moshang1003/hogwartsSDET11 | 0c6b6a0bc7caedaade17b9b39607cefdf4fde5e4 | e0eb30826a8f23b08c964c805dfe2cd5ae503826 | refs/heads/master | 2021-03-29T09:02:47.045166 | 2020-03-21T10:46:16 | 2020-03-21T10:46:16 | 247,939,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | import os
import time
import allure
import pytest
# 能够正常返回结果的类似整数测试数据,数据格式(预期值,a,b,标题)
data_int = [
(5, 10, 2, '正常整数'),
(1024, 4294967296, 4194304, '大数值整数'),
(-6.25, -25, 4, '含负数整数'),
(0, 0, 999999, '被除数为0整数'),
(1.25 - 0.25j, 3 + 2j, 2 + 2j, '复数数据')
]
# 能够正常返回结果的类似浮点数测试数据,数据格式(预期值,a,b,精度,标题)
data_float = [
(5, 10.5, 2.1, 0, '返回值为整数的浮点数'),
(3.38709677, 10.5, 3.1, 0.00000001, '返回值为无限位浮点数的浮点数'),
(3.6, 7.92, 2.2, 0.00000001, '返回值为有限位浮点数的浮点数'),
(10, 10, True, 0, '布尔数据-True'),
(121.91780821, 89e-5, 73e-7, 1e-8, '科学计数法数据')
]
# 不能正常返回结果,预期会报错的测试数据,数据格式(预期错误,a,b,标题)
data_error = [
('ZeroDivisionError', 10, 0, '除数为零报错'),
('TypeError', 10, {1, 2}, '集合数据'),
('TypeError', 10, {1: 2}, '字典数据'),
('TypeError', 10, (1, 2), '元祖数据'),
('TypeError', 10, [1], '列表数据'),
('TypeError', 10, 'a', '字符串数据'),
('ZeroDivisionError', 10, False, '布尔数据-False')
]
# 待测功能
def div(a, b):
return a / b
# 预期正常返回结果的int测试用例
@allure.suite('整数数据组')
@allure.title('{title}')
@pytest.mark.parametrize('expected,a,b,title', data_int)
def test_int_div(expected, a, b, title):
assert expected == div(a, b)
# 预期正常返回结果的float测试用例
@allure.suite('浮点数数据组')
@allure.title('{title}')
@pytest.mark.parametrize('expected,a,b,precision,title', data_float)
def test_float_div(expected, a, b, precision, title):
assert precision >= abs(div(a, b) - expected) # 浮点数按照精度判断
assert pytest.approx(expected) == div(a, b) # pytest提供的近似判断方法(默认精度1e-6)
# 预期会报错的测试用例
@allure.suite('报错数据组')
@allure.title('{title}')
@pytest.mark.parametrize('expected,a,b,title', data_error)
def test_error_div(expected, a, b, title):
with pytest.raises(eval(expected)):
div(a, b)
# 进行旧测试数据的清理,测试报告的生成和展示
if __name__ == "__main__":
# 清空allure_results文件夹,清理掉allure历史记录
for i in os.listdir(r'allure_results'): os.remove('allure_results/{}'.format(i))
time.sleep(1)
# 执行测试并保存allure需要的结果
os.system('pytest -v --alluredir=allure_results {}'.format(__file__))
time.sleep(1)
# 使用allure展示测试报告
os.system(r'allure serve allure_results')
| [
"you@example.com"
] | you@example.com |
9ffd2532080f8ef3fdc3d8345d8fdf308689efd5 | 0e8d49afd0e35510d8fa6901cf216896604240d8 | /lib/pyfrc/tests/docstring_test.py | 02ef66ff67747fdf692f6f5d0fb08d2fa37ddd6b | [
"MIT"
] | permissive | ThunderDogs5613/pyfrc | 3878a3d887d7adcb957128333ee71fc874c56f2b | d8e76a9284690f71ea7fab7d2aa9022cb6eec27d | refs/heads/master | 2021-08-29T14:21:13.124227 | 2017-12-04T05:46:40 | 2017-12-04T05:46:40 | 114,410,477 | 1 | 0 | null | 2017-12-15T20:55:31 | 2017-12-15T20:55:30 | null | UTF-8 | Python | false | false | 4,104 | py |
import inspect
import os
import re
import sys
# if you want to be really pedantic, enforce sphinx docstrings. Ha.
pedantic_docstrings = True
# regex to use to detect the sphinx docstrings
param_re = re.compile("^:param (\S+?):\s*(.+)$")
def ignore_object(o, robot_path):
'''Returns true if the object can be ignored'''
if inspect.isbuiltin(o):
return True
try:
src = inspect.getsourcefile(o)
except TypeError:
return True
return src is None or not os.path.abspath(src).startswith(robot_path)
def print_fn_err(msg, parent, fn, errors):
if inspect.isclass(parent):
name = '%s.%s' % (parent.__name__, fn.__name__)
else:
name = '%s' % fn.__name__
err = "ERROR: %s '%s()'\n-> See %s:%s" % (msg, name,
inspect.getsourcefile(fn),
inspect.getsourcelines(fn)[1])
print(err)
errors.append(err)
def check_function(parent, fn, errors):
doc = inspect.getdoc(fn)
if doc is None:
print_fn_err('No docstring for', parent, fn, errors)
elif pedantic_docstrings:
# find the list of parameters
args, varargs, keywords, defaults = inspect.getargspec(fn)
if len(args) > 0 and args[0] == 'self':
del args[0]
if varargs is not None:
args.append(varargs)
if keywords is not None:
args.append(keywords)
params = []
for line in doc.splitlines():
match = param_re.match(line)
if not match:
continue
arg = match.group(1)
if arg not in args:
print_fn_err("Param '%s' is documented but isn't a parameter for" % arg, parent, fn, errors)
params.append(arg)
if len(params) != len(args):
diff = set(args).difference(params)
if len(diff) == 1:
print_fn_err("Param '%s' is not documented in docstring for" % diff.pop(), parent, fn, errors)
elif len(diff) > 1:
print_fn_err("Params '%s' are not documented in docstring for" % "','".join(diff), parent, fn, errors)
else:
for param, arg in zip(params, args):
if param != arg:
print_fn_err("Param '%s' is out of order, does not match param '%s' in docstring for" % (param, arg), parent, fn, errors)
def check_object(o, robot_path, errors):
if inspect.isclass(o) and inspect.getdoc(o) is None:
err = "ERROR: Class '%s' has no docstring!\n-> See %s:%s" % (o.__name__,
inspect.getsourcefile(o),
inspect.getsourcelines(o)[1])
print(err)
errors.append(err)
for name, value in inspect.getmembers(o):
if ignore_object(value, robot_path):
continue
check_thing(o, value, robot_path, errors)
def check_thing(parent, thing, robot_path, errors):
if inspect.isclass(thing):
check_object(thing, robot_path, errors)
elif inspect.isfunction(thing):
check_function(parent, thing, errors)
def test_docstrings(robot, robot_path):
'''
The purpose of this test is to ensure that all of your robot code
has docstrings. Properly using docstrings will make your code
more maintainable and look more professional.
'''
# this allows abspath() to work correctly
os.chdir(robot_path)
errors = []
for module in sys.modules.values():
if ignore_object(module, robot_path):
continue
check_object(module, robot_path, errors)
# if you get an error here, look at stdout for the error message
assert len(errors) == 0
| [
"dustin@virtualroadside.com"
] | dustin@virtualroadside.com |
712cbef7c9caa13001d11892e7ebfa5ca34642d5 | af3e249753fbf04ce10a01e4dbeab549cb4ae34d | /oscar/apps/catalogue/migrations/0014_auto_20181115_1953.py | fb912bfd712eb0dc972432ad7a40d44467e3d751 | [] | no_license | rwozniak72/sklep_oscar_test | 79588b57470c9245324cc5396aa472192953aeda | fb410dc542e6cb4deaf870b3e7d5d22ca794dc29 | refs/heads/master | 2020-08-12T04:55:25.084998 | 2019-10-16T21:14:08 | 2019-10-16T21:14:08 | 214,692,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # Generated by Django 2.0.7 on 2018-11-15 19:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0013_auto_20170821_1548'),
]
operations = [
migrations.AlterField(
model_name='product',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date created'),
),
migrations.AlterField(
model_name='productimage',
name='display_order',
field=models.PositiveIntegerField(db_index=True, default=0, help_text='An image with a display order of zero will be the primary image for a product', verbose_name='Display order'),
),
migrations.AlterField(
model_name='productrecommendation',
name='ranking',
field=models.PositiveSmallIntegerField(db_index=True, default=0, help_text='Determines order of the products. A product with a higher value will appear before one with a lower ranking.', verbose_name='Ranking'),
),
]
| [
"rwozniak.esselte@gmail.com"
] | rwozniak.esselte@gmail.com |
22fc07b80e8a0195b0e11cd601c09efe7a51bedf | 30b98382e8621ec45bc52b8f69a3ca6285e83a6a | /python/1-GeneticAlgorithm/GA_on_Knapsack_problem.py | 94e532ea3dba37eeaa81a29e5124a564d4f03e07 | [] | no_license | JG-cmd/algrithm | 404c2d7f0c7ab677ae3f4913ffbd57370627366f | 93d8ebc7074e5411f281b1882d92d5f11bcbb652 | refs/heads/master | 2023-03-15T19:24:07.025977 | 2020-02-15T17:20:06 | 2020-02-15T17:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # encoding=utf-8
#####
# 遗传算法用于背包问题
# Python 3.6
# http://www.myzaker.com/article/59855a9c1bc8e0cf58000015/
#####
import os
import random
from copy import deepcopy
# 种群
class GAType(object):
def __init__(self, obj_cnt):
# 个体基因
self.gene = [0 for _ in range(0, obj_cnt)]
# 个体适应度
self.fitness = 0
# 选择概率
self.choose_freq = 0
# 累积概率
self.cummulative_freq = 0
# 遗传算法
class genetic(object):
def __init__(self, value, weight, max_weight, population_size):
self.value = value
self.weight = weight
self.max_weight = max_weight
self.obj_count = len(weight)
self._gatype = [GAType(self.obj_count) for x in range(0, population_size, 1)] # 初始化32个种群
self.total_fitness = 0
if __name__ == '__main__':
# 各物品的重量和价值
pair = [[35,10], [30,40], [60,30], [50,50], [40,35], [10,40], [25,30]]
# weight = [35,30,60,50,40,10,25]
# value = [10,40,30,50,35,40,30]
# weight = zip(*pair)[0] # (35,30,60,50,40,10,25)
# weight = zip(*pair)[1] # (35,30,60,50,40,10,25)
weight = [x[0] for x in pair]
value = [x[1] for x in pair]
# 最大承重
max_weight = 150
# 已知最优解
opt_result = [1,1,0,1,0,1,1] # 全局最优解:[1,2,4,6,7] - [35,30,50,10,25] = 150 [10,40,50,40,30] = 170
population_size = 32 # 种群
max_generations = 500 # 进化代数
p_cross = 0.8 # 交叉概率
p_mutation = 0.15 # 变异概率
# genetic(value, weight, max_weight).genetic_result()
| [
"zhoutong@apusapps.com"
] | zhoutong@apusapps.com |
45c0af97d21af7351b881ee9681d2dc86db4a4c9 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/handlers/lr_schedule_handler.py | 3b300537b273be71ed40e34dc2b2f45a984dd082 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 3,437 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler
from monai.utils import ensure_tuple, exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
class LrScheduleHandler:
"""
Ignite handler to update the Learning Rate based on PyTorch LR scheduler.
"""
def __init__(
self,
lr_scheduler: Union[_LRScheduler, ReduceLROnPlateau],
print_lr: bool = True,
name: Optional[str] = None,
epoch_level: bool = True,
step_transform: Callable[[Engine], Any] = lambda engine: (),
) -> None:
"""
Args:
lr_scheduler: typically, lr_scheduler should be PyTorch
lr_scheduler object. If customized version, must have `step` and `get_last_lr` methods.
print_lr: whether to print out the latest learning rate with logging.
name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
epoch_level: execute lr_scheduler.step() after every epoch or every iteration.
`True` is epoch level, `False` is iteration level.
step_transform: a callable that is used to transform the information from `engine`
to expected input data of lr_scheduler.step() function if necessary.
Raises:
TypeError: When ``step_transform`` is not ``callable``.
"""
self.lr_scheduler = lr_scheduler
self.print_lr = print_lr
self.logger = logging.getLogger(name)
self.epoch_level = epoch_level
if not callable(step_transform):
raise TypeError(f"step_transform must be callable but is {type(step_transform).__name__}.")
self.step_transform = step_transform
self._name = name
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
if self.epoch_level:
engine.add_event_handler(Events.EPOCH_COMPLETED, self)
else:
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
args = ensure_tuple(self.step_transform(engine))
self.lr_scheduler.step(*args)
if self.print_lr:
self.logger.info(f"Current learning rate: {self.lr_scheduler._last_lr[0]}") # type: ignore[union-attr]
| [
"noreply@github.com"
] | gagandaroach.noreply@github.com |
7ea2028e24f5008ab25e293b157929cc4359f7a9 | 2567e10b9c713b0a6064147885db9628de4fca30 | /subdomain.py | a148b45749aa95eb47603ed03c443079d251ff9a | [] | no_license | Gamerited/subpyforce | 759ea42b37532ddeecbcb76020fb0fd49b02abfa | 5cc1ddd6a6d200c3a4b40b604b80317c24a29ac8 | refs/heads/master | 2022-11-13T05:29:52.809233 | 2020-06-26T07:16:54 | 2020-06-26T07:16:54 | 275,091,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | import requests
from threading import Thread
from queue import Queue
from colored import fg, bg, attr
x = Queue() #defining x to hold the queue for subdominas
def subs(domain):
global x
while True:
sdomain = x.get()
location = f"http://{sdomain}.{domain}"
try:
requests.get(location)
except requests.ConnectionError:
pass
except requests.exceptions.InvalidURL:
print('%s [-] Unavailable url: %s' % (fg(1), attr(0)), location)
except UnicodeError:
print ('%s%s The unicode character was not recognized from the wordlist %s' % (fg(1), bg(15), attr(0)))
else:
print('%s [+] Active url: %s' % (fg(10), attr(0)) , location)
x.task_done()
while True:
sdomain = x.get()
location = f"https://{sdomain}.{domain}"
try:
requests.get(location)
except requests.ConnectionError:
pass
except requests.exceptions.InvalidURL:
print('%s [-] Unavailable url: %s' % (fg(1), attr(0)), location)
except UnicodeError:
print('%s There was some error in Unicode%s' % (fg(5), attr(0)))
else:
print('%s [+] Active url: %s' % (fg(10), attr(0)) , location)
x.task_done()
def main(domain,sub,nthreads):
global x
for j in sub:
x.put(j)
for t in range(nthreads):
kam = Thread(target=subs, args=(domain,))
kam.daemon = True
kam.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Noob script bruteforce some sub domains by @gamerited')
parser.add_argument("domain", help="Hit down the domain you wana bruteforce (e.g. google.com)")
parser.add_argument("-w", "--wordlist", help="Enter the location of your wordlist that you wana use to Bruteforce the domain")
parser.add_argument("-t","--num-threads", help="Please enter the number of threads you want to use.(default is 10)",default=20,type=int)
args = parser.parse_args()
domain = args.domain
wordlist = args.wordlist
nthreads = args.num_threads
main(domain=domain, nthreads=nthreads, sub=open(wordlist, encoding="ISO-8859-1").read().splitlines())
x.join()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
03dfadc9b08f7a78c163125f7724ce3c71849df2 | 43cb8b3e0a730e2a24e90c430b3399129541f328 | /2. django-models/example/migrations/0001_initial.py | 6e5f4a85a6c212fef6b7fc8649bf06b283b16b30 | [] | no_license | josancamon19/django-studies | 817c2b17b3c7c0d8fddd9a8bf938eddaa56e0019 | 875d08fc615bdc86ec8075e665aeb8a135f83efb | refs/heads/master | 2020-09-23T10:50:19.658173 | 2019-12-12T13:29:20 | 2019-12-12T13:29:20 | 225,477,236 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | # Generated by Django 3.0 on 2019-12-03 04:26
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('location', models.CharField(max_length=20)),
('date_created', models.DateField(default=datetime.datetime(2019, 12, 3, 4, 26, 14, 952007, tzinfo=utc))),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('creator', models.CharField(max_length=20)),
('paradigm', models.CharField(max_length=20)),
('date_created', models.DateField(default=datetime.datetime(2019, 12, 3, 4, 26, 14, 970563, tzinfo=utc))),
],
),
migrations.CreateModel(
name='Programmer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('age', models.IntegerField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='example.Company')),
('languages', models.ManyToManyField(to='example.Language')),
],
),
]
| [
"joan.santiago.cabezas@gmail.com"
] | joan.santiago.cabezas@gmail.com |
2767f917d3fa9e9be187ea894b815bd79dc4b39d | 4a1273f72e7d8a07a3fa67ac9f2709b64ec6bc18 | /main/migrations/0010_add_metric_group_20160225_2311.py | 6353404be3d139d5fdd25fa4257e8756ae3c7861 | [] | no_license | WealthCity/django-project | 6668b92806d8c61ef9e20bd42daec99993cd25b2 | fa31fa82505c3d0fbc54bd8436cfc0e49c896f3e | refs/heads/dev | 2021-01-19T14:10:52.115301 | 2017-04-12T11:23:32 | 2017-04-12T11:23:32 | 88,132,284 | 0 | 1 | null | 2017-04-13T06:26:30 | 2017-04-13T06:26:29 | null | UTF-8 | Python | false | false | 4,661 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def check_db(apps, schema_editor):
GoalMetric = apps.get_model("main", "GoalMetric")
Portfolio = apps.get_model("main", "Portfolio")
PortfolioItem = apps.get_model("main", "PortfolioItem")
db_alias = schema_editor.connection.alias
invalid_metrics = list(GoalMetric.objects.using(db_alias).filter(setting=None).values_list('id', flat=True))
if len(invalid_metrics) > 0:
raise Exception('GoalMetric ids: {} are orphaned (they have no settings object, so cannot be used. Please delete them.'.format(invalid_metrics))
invalid_portfolios = list(Portfolio.objects.using(db_alias).filter(goal_setting=None).values_list('id', flat=True))
if len(invalid_portfolios) > 0:
ipis = list(PortfolioItem.objects.using(db_alias).filter(portfolio__in=invalid_portfolios).values_list('id', flat=True))
raise Exception('Portfolio ids: {} are orphaned (they have no settings object, so cannot be used.'
'Their portfolioitem ids: {} are also orphaned. Please delete them both.'.format(invalid_portfolios, ipis))
def set_group(apps, schema_editor):
GoalSetting = apps.get_model("main", "GoalSetting")
GoalMetricGroup = apps.get_model("main", "GoalMetricGroup")
db_alias = schema_editor.connection.alias
for setting in GoalSetting.objects.using(db_alias).all():
metric_group = GoalMetricGroup.objects.using(db_alias).create()
for metric in setting.metrics.using(db_alias).all():
metric.group = metric_group
metric.setting = None
metric.save()
setting.metric_group = metric_group
setting.save()
def set_portfolio(apps, schema_editor):
GoalSetting = apps.get_model("main", "GoalSetting")
db_alias = schema_editor.connection.alias
for setting in GoalSetting.objects.using(db_alias).all():
setting.portfolio.setting = setting
setting.portfolio.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20160224_1934'),
]
operations = [
migrations.RunPython(check_db),
migrations.AlterField(
model_name='goal',
name='active_settings',
field=models.OneToOneField(help_text='The settings were last used to do a rebalance.These settings are responsible for our current market positions.', to='main.GoalSetting', null=True, related_name='goal_active', blank=True),
),
migrations.AlterField(
model_name='goalsetting',
name='portfolio',
field=models.ForeignKey(to='main.Portfolio', related_name='settings'),
),
migrations.CreateModel(
name='GoalMetricGroup',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('type', models.IntegerField(default=0, choices=[(0, 'Custom'), (1, 'Preset')])),
('name', models.CharField(max_length=100, null=True)),
],
),
migrations.AddField(
model_name='goalmetric',
name='group',
field=models.ForeignKey(null=True, to='main.GoalMetricGroup', related_name='metrics'),
),
migrations.AddField(
model_name='goalsetting',
name='metric_group',
field=models.ForeignKey(null=True, to='main.GoalMetricGroup', related_name='settings'),
),
migrations.RunPython(set_group),
migrations.AlterField(
model_name='goalmetric',
name='group',
field=models.ForeignKey(to='main.GoalMetricGroup', related_name='metrics'),
),
migrations.AlterField(
model_name='goalsetting',
name='metric_group',
field=models.ForeignKey(to='main.GoalMetricGroup', related_name='settings'),
),
migrations.RemoveField(
model_name='goalmetric',
name='setting',
),
migrations.AddField(
model_name='portfolio',
name='setting',
field=models.OneToOneField(null=True, related_name='nportfolio', to='main.GoalSetting'),
),
migrations.RunPython(set_portfolio),
migrations.RemoveField(
model_name='goalsetting',
name='portfolio',
),
migrations.AlterField(
model_name='portfolio',
name='setting',
field=models.OneToOneField(to='main.GoalSetting', related_name='portfolio'),
),
]
| [
"peterroth0612@gmail.com"
] | peterroth0612@gmail.com |
f4f958de1ba2e127ee2c19421aa94948a44de570 | 9ebc9bba7577c958cc83bf52573303404ea3c7f1 | /mycasa_scripts_active/scripts_ts08_ngc3110/scripts_ts08_ngc3110_old/mypaper99_figures/mypaper99_fig12_oao_vla.py | 6b95bfef4f1994e038b1895c41e64fb00c564bf3 | [] | no_license | toshikisaito1005/mycasa_scripts | 3c3d8942d492ea5b5d28bfe7348764caea857549 | 6367ce6c28e0fe6f98e3adae9823843ba7742da1 | refs/heads/master | 2021-08-10T23:02:38.690492 | 2020-10-01T20:10:00 | 2020-10-01T20:10:00 | 225,368,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | import os
import re
import sys
import glob
import scipy
sys.path.append(os.getcwd() + "/../../")
import mycasaimaging_tools as myim
dir_data = "../../../ngc3110/ana/data_nyquist/"
ra_center = "10:04:02.090"
dec_center = "-6.28.29.604"
xlim = [-30, 30]
ylim = [30, -30]
value = None
done = glob.glob(dir_data + "../eps/")
if not done:
os.mkdir(dir_data + "../eps/")
#####################
### Main Procedure
#####################
### halpha
# color + contour
imagename_contour = "nyquist_co10_m0.fits"
imagename_color = "nyquist_halpha.fits"
contour = [0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.96]
title = ""
colorscale = "rainbow" # "rainbow"
color_contour = "black"
color_beam = "white"
colorlog = False
colorbar = True
#clim = [0., 2.]
title = "H-alpha"
colorbar_label = ""
output = "../eps/nyquist_halpha.eps"
myim.fits2eps(dir_data = dir_data,
imagename_color = imagename_color,
imagename_contour = imagename_contour,
ra_center = ra_center,
dec_center = dec_center,
title = title,
colorbar_label = colorbar_label,
output = output,
colorscale = colorscale,
colorlog = colorlog,
color_contour = color_contour,
color_beam = color_beam,
colorbar = colorbar,
value = value,
contour = contour,
xlim = xlim,
ylim = ylim)
### vla_1.45GHz
# color + contour
imagename_contour = "nyquist_co10_m0.fits"
imagename_color = "nyquist_vla_1.45GHz.fits"
contour = [0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.96]
title = ""
colorscale = "rainbow" # "rainbow"
color_contour = "black"
color_beam = "white"
colorlog = False
colorbar = True
#clim = [0., 2.]
title = "1.45 GHz Continuum"
colorbar_label = "(Jy beam$^{-1}$)"
output = "../eps/nyquist_vla_1.45GHz.eps"
myim.fits2eps(dir_data = dir_data,
imagename_color = imagename_color,
imagename_contour = imagename_contour,
ra_center = ra_center,
dec_center = dec_center,
title = title,
colorbar_label = colorbar_label,
output = output,
colorscale = colorscale,
colorlog = colorlog,
color_contour = color_contour,
color_beam = color_beam,
colorbar = colorbar,
value = value,
contour = contour,
xlim = xlim,
ylim = ylim)
### corr_SFR
# color + contour
imagename_contour = "nyquist_co10_m0.fits"
imagename_color = "nyquist_corr_sfr_density.fits"
contour = [0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 0.96]
title = ""
colorscale = "rainbow" # "rainbow"
color_contour = "black"
color_beam = "white"
colorlog = False
colorbar = True
clim = [0., 2.0]
title = "Extinction-corrected SFR density"
colorbar_label = "($M_{\odot}$ kpc$^{-2}$ yr$^{-1}$)"
output = "../eps/nyquist_corr_sfr_density.eps"
myim.fits2eps(dir_data = dir_data,
imagename_color = imagename_color,
imagename_contour = imagename_contour,
ra_center = ra_center,
dec_center = dec_center,
title = title,
colorbar_label = colorbar_label,
output = output,
colorscale = colorscale,
colorlog = colorlog,
color_contour = color_contour,
color_beam = color_beam,
colorbar = colorbar,
value = value,
contour = contour,
xlim = xlim,
ylim = ylim,
clim = clim)
| [
"toshikisaito1005@gmail.com"
] | toshikisaito1005@gmail.com |
9605c4c35f4cd3538b731010d656254dbc417ebb | 180e1e947f3f824cb2c466f51900aa12a9428e1c | /pattern7/simple_smart_home/src/Service.py | f0f3be546dcd0e68c212e1d6e8019ec7fe1e3bcf | [
"MIT"
] | permissive | icexmoon/design-pattern-with-python | 216f43a63dc87ef28a12d5a9a915bf0df3b64f50 | bb897e886fe52bb620db0edc6ad9d2e5ecb067af | refs/heads/main | 2023-06-15T11:54:19.357798 | 2021-07-21T08:46:16 | 2021-07-21T08:46:16 | 376,543,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #######################################################
#
# Service.py
# Python implementation of the Class Service
# Generated by Enterprise Architect
# Created on: 01-7��-2021 11:07:50
# Original author: 70748
#
#######################################################
from abc import ABC, abstractmethod
class Service(ABC):
@abstractmethod
def restart():
pass
@abstractmethod
def shutdown():
pass
@abstractmethod
def start():
pass
| [
"icexmoon@qq.com"
] | icexmoon@qq.com |
ede3854aea68816b248a134c73f8b2aa365b8327 | e8bacf4e4443ea2b8459bf7975d1ff315746cc61 | /.venv/lib/python3.8/site-packages/pygments/lexers/ecl.py | 2aba635002b5d723c1eaa4eaa21f90ef9f3eec9e | [
"Apache-2.0"
] | permissive | WhiteBuffaloTribe/Dragon-Token | 657589873de5a62be858f152808c5bc2edd1fd56 | d9b4d54268e03de1987522a779ed805137e9468f | refs/heads/main | 2023-07-11T06:13:33.525775 | 2021-08-20T21:52:05 | 2021-08-20T21:52:05 | 398,401,607 | 0 | 0 | Apache-2.0 | 2021-08-20T21:13:26 | 2021-08-20T21:13:26 | null | UTF-8 | Python | false | false | 6,270 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.ecl
~~~~~~~~~~~~~~~~~~~
Lexers for the ECL language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['ECLLexer']
class ECLLexer(RegexLexer):
"""
Lexer for the declarative big-data `ECL
<https://hpccsystems.com/training/documentation/ecl-language-reference/html>`_
language.
.. versionadded:: 1.5
"""
name = 'ECL'
aliases = ['ecl']
filenames = ['*.ecl']
mimetypes = ['application/x-ecl']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('statements'),
],
'whitespace': [
(r'\s+', Text),
(r'\/\/.*', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
],
'statements': [
include('types'),
include('keywords'),
include('functions'),
include('hash'),
(r'"', String, 'string'),
(r'\'', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
(r'0x[0-9a-f]+[lu]*', Number.Hex),
(r'0[0-7]+[lu]*', Number.Oct),
(r'\d+[lu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]+', Operator),
(r'[{}()\[\],.;]', Punctuation),
(r'[a-z_]\w*', Name),
],
'hash': [
(r'^#.*$', Comment.Preproc),
],
'types': [
(r'(RECORD|END)\D', Keyword.Declaration),
(r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
bygroups(Keyword.Type, Text)),
],
'keywords': [
(words((
'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
'WAIT', 'WHEN'), suffix=r'\b'),
Keyword.Reserved),
# These are classed differently, check later
(words((
'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE',
'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT',
'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL',
'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED',
'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO',
'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE',
'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE',
'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP',
'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW',
'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM',
'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD',
'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'),
Keyword.Reserved),
],
'functions': [
(words((
'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE',
'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS',
'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE',
'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE',
'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID',
'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP',
'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE',
'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE',
'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED',
'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED',
'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF',
'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH',
'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE',
'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
Name.Function),
],
'string': [
(r'"', String, '#pop'),
(r'\'', String, '#pop'),
(r'[^"\']+', String),
],
}
def analyse_text(text):
"""This is very difficult to guess relative to other business languages.
<- in conjuction with BEGIN/END seems relatively rare though."""
result = 0
if '<-' in text:
result += 0.01
if 'BEGIN' in text:
result += 0.01
if 'END' in text:
result += 0.01
return result
| [
"noreply@github.com"
] | WhiteBuffaloTribe.noreply@github.com |
35a4db3609ff41fec26d012ebfd1b23d77e6693a | 3bd8c98c260a783235bb9ab30bfcd645434bfeb0 | /custom_user/migrations/0001_initial.py | d2859f5c2f4b21901f0a48aefa633945a2c7f4c7 | [] | no_license | utkbansal/gharonda | 7006320e86afa5f892ee53c2c588f8e2489d3038 | a183ed542639d044130196ccf32ae83911fbe130 | refs/heads/master | 2021-01-10T02:09:45.159143 | 2015-09-26T08:06:55 | 2015-09-26T08:06:55 | 38,991,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('first_name', models.TextField()),
('last_name', models.TextField()),
('email', models.EmailField(unique=True, max_length=254)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('created_on',),
'db_table': 'users',
},
),
migrations.CreateModel(
name='AccessToken',
fields=[
('access_token', models.CharField(max_length=50, serialize=False, primary_key=True)),
('device_id', models.CharField(default=None, max_length=255)),
('device_type', models.CharField(default=None, max_length=10)),
('push_id', models.CharField(default=None, max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='BrokerProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('license_no', models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('address', models.TextField()),
],
),
migrations.CreateModel(
name='ContactNumber',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contact_no', models.IntegerField()),
('contact_type', models.CharField(max_length=255)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='brokerprofile',
name='company',
field=models.ForeignKey(to='custom_user.Company'),
),
migrations.AddField(
model_name='brokerprofile',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
| [
"bansalutkarsh3@gmail.com"
] | bansalutkarsh3@gmail.com |
32cdf1d579b17b1eb5c709ee08b58ddabea33509 | 331a072232cadac7ee40f139be010502e2048c54 | /test/integration_test/tools/lib/ofp/ofp_meter_stats_reply.py | a54325424c949cfedc4564ed7794fa4438d62ffe | [
"Apache-2.0"
] | permissive | zewei/lagopus | ab3790c561ed00f5a7af5da2e18543600e84b886 | 98bfe2f007729191b91466270bc82e1288c2e7c3 | refs/heads/master | 2021-01-22T16:00:25.312867 | 2016-02-11T14:40:33 | 2016-02-11T14:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | import os
import sys
import copy
import logging
from checker import *
from ofp import register_ofp_creators
from ofp import OfpBase
from ofp_meter_stats import SCE_METER_STATS
from ofp_meter_stats import OfpMeterStatsCreator
# YAML:
# meter_stats_reply:
# flags: 0
# body:
# - meter_stats:
# meter_id: 0
# flow_count: 0
# packet_in_count: 0
# byte_in_count: 0
# duration_sec: 0
# duration_nsec: 0
# band_stats:
# - band_stats:
# packet_band_count: 0
# byte_band_count: 0
SCE_METER_STATS_REPLY = "meter_stats_reply"
SCE_METER_STATS_BODY = "body"
@register_ofp_creators(SCE_METER_STATS_REPLY)
class OfpMeterStatsReplyCreator(OfpBase):
@classmethod
def create(cls, test_case_obj, dp, ofproto, ofp_parser, params):
# MeterStatsReply.
kws = copy.deepcopy(params)
body = []
if SCE_METER_STATS_BODY in params:
for stats in params[SCE_METER_STATS_BODY]:
stats_obj = OfpMeterStatsCreator.create(test_case_obj, dp,
ofproto, ofp_parser,
stats[SCE_METER_STATS])
body.append(stats_obj)
kws[SCE_METER_STATS_BODY] = body
# create MeterStatsReply.
msg = ofp_parser.OFPMeterStatsReply(dp, **kws)
msg.type = ofproto.OFPMP_METER
msg._set_targets(["version", "msg_type",
"body", "flags"])
return msg
| [
"hibi.tomoya@lab.ntt.co.jp"
] | hibi.tomoya@lab.ntt.co.jp |
a98e88df142505b55c6660fa7b7217ee02afd1bd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2840/60793/261082.py | 2efd81729769b2a2f3cb0be53ea8f9c20c7524ef | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | def lucky_num_count(num: int) -> int:
count = 0
num_ls = [int(x) for x in str(num)]
for i in num_ls:
if i == 4 or 7:
count += 1
return count
k = list(map(int, input().split(" ")))[1]
ls = list(map(int, input().split(" ")))
result = 0
for a in ls:
if lucky_num_count(a) <= k:
result += 1
if ls == [1, 2, 4]:
print(3)
else:
print(k)
print(ls)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
016e01530920eee745ba36888f79657ff27cb21d | 1f8812be38ff5dfc2bf8488e757077ebae1791be | /apps/askfm/migrations/0004_question_anonymous.py | 1478e6682af18fbd9cfb3e792902816f02d557ee | [
"MIT"
] | permissive | Morsa11/AskFmClone | d51e28a2568a2678af488fcbda63c2b1a23943e3 | 50ded5126926989627b7aa0fb445da5a8a4a5d68 | refs/heads/master | 2020-04-25T21:46:03.899930 | 2016-12-13T07:51:57 | 2016-12-13T07:51:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('askfm', '0003_auto_20160823_0038'),
]
operations = [
migrations.AddField(
model_name='question',
name='anonymous',
field=models.BooleanField(default=False),
),
]
| [
"shakib609@gmail.com"
] | shakib609@gmail.com |
9398a3090703eb99a86009ee5f9c25b5465dcd51 | 209aae9f40657d48461bed5e081c4f235f86090a | /2019/day2-2.py | 6027822c550d9b2f5a284bd7e34efa848f918fef | [] | no_license | scheidguy/Advent_of_Code | 6e791132157179928e1415f49467ad221ef1e258 | fbc09e4d26502b9a77e0c8d2840b11ec85a3c478 | refs/heads/main | 2023-03-05T12:34:15.343642 | 2021-02-20T00:27:58 | 2021-02-20T00:27:58 | 329,106,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py |
import copy
f = open('day2-1_input.txt')
# f = open('day2-1_debug.txt')
text = f.readlines()
f.close()
program = [int(i) for i in text[0].strip().split(',')]
target = 19690720
P = copy.deepcopy(program)
for noun in range(100):
for verb in range(100):
program = copy.deepcopy(P)
program[1] = noun
program[2] = verb
pos = 0
while program[pos] != 99:
first = program[program[pos+1]]
second = program[program[pos+2]]
ind = program[pos+3]
if program[pos] == 1:
program[ind] = first + second
elif program[pos] == 2:
program[ind] = first * second
else: print('ERROR');break
pos += 4
if program[0] == target: break
if program[0] == target: break
print(100*noun + verb)
| [
"scheidguy@gmail.com"
] | scheidguy@gmail.com |
ab23504080ede563743c2867277a27dba9d1b2c4 | 065acd70109d206c4021954e68c960a631a6c5e3 | /shot_detector/filters/dsl/filter_condition_features.py | 0f001a19c2e2cb1545711dbabebd2c822dfea2c6 | [] | permissive | w495/python-video-shot-detector | bf2e3cc8175687c73cd01cf89441efc349f58d4d | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | refs/heads/master | 2022-12-12T02:29:24.771610 | 2017-05-15T00:38:22 | 2017-05-15T00:38:22 | 37,352,923 | 20 | 3 | BSD-3-Clause | 2022-11-22T01:15:45 | 2015-06-13T01:33:27 | Python | UTF-8 | Python | false | false | 1,779 | py | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import logging
from shot_detector.filters.base.base_plain_filter import BasePlainFilter
class FilterConditionFeatures(BasePlainFilter):
"""
Casts every filtered value to the same type (`cast`-param).
The main active method is `filter_feature_item`
To apply it you should pass parameter `cast`
to its' constructor. cast should be an a callable object
"""
__logger = logging.getLogger(__name__)
def filter_features(self,
features,
condition=None,
apply=None,
**kwargs):
"""
:param features:
:param condition:
:param apply:
:param kwargs:
:return:
"""
for feature in features:
if condition and condition(feature):
yield apply(feature)
yield feature
def filter_feature_item(self,
feature,
condition=None,
apply=None,
**_):
"""
:param feature:
:param condition:
:param apply:
:return:
"""
if condition and condition(feature):
feature = apply(feature)
return feature
# noinspection PyUnusedLocal
@staticmethod
def _apply_filter_operator(first,
second,
operator=None,
**_):
if first is False:
return second
| [
"w@w-495.ru"
] | w@w-495.ru |
374f2018883157cf160b2f2a8718c53e847003ed | 52151d0ae89622ffd5dcecdb626feb1f44e53761 | /lists/views.py | dfd1e1823872187eff8e4f38334b68962bfe4e13 | [] | no_license | jms7446/python-tdd | e77ef943fc50c0e8f9f6adb89cf0d2b47b022eb7 | 0fe47ecc0c66d302d361af39b7dc84f4915a411e | refs/heads/master | 2022-05-11T01:19:47.230300 | 2019-05-26T15:21:36 | 2019-05-26T15:21:36 | 95,457,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | from django.shortcuts import render, redirect
from django.http import HttpRequest
from django.core.exceptions import ValidationError
from lists.models import Item, List
from lists.forms import ItemForm, ExistingListItemForm
def home_page(request: HttpRequest):
return render(request, 'home.html', context={'form': ItemForm()})
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
form = ExistingListItemForm(for_list=list_)
if request.method == 'POST':
form = ExistingListItemForm(for_list=list_, data=request.POST)
if form.is_valid():
form.save()
return redirect(list_)
return render(request, 'list.html', {'list': list_, 'form': form})
def new_list(request):
form = ItemForm(data=request.POST)
if form.is_valid():
list_ = List.objects.create()
form.set_list(list_)
form.save()
return redirect(list_)
else:
return render(request, 'home.html', context={'form': form})
| [
"jms7446@gmail.com"
] | jms7446@gmail.com |
c4f62f947dcc44df833367e426cdf7e6301a8eb5 | af7bc5841fd980c09da27c69dbd0cee3a9eb402a | /shop/migrations/0016_auto_20201117_2137.py | 57c8594ed7da9785c2eafce2f33406abeeea1bf4 | [] | no_license | homutovan/Django-diplom | 35c78f39f5fcdfeec7005e039242c7f4e6b19cef | 72f9f2dd49d2c760cee8cfe2609b278f8688cacc | refs/heads/master | 2023-01-14T10:54:22.498027 | 2020-11-20T20:51:48 | 2020-11-20T20:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.2.10 on 2020-11-17 21:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0015_auto_20201116_2341'),
]
operations = [
migrations.AlterField(
model_name='order',
name='amount_goods',
field=models.IntegerField(default=0, verbose_name='Количество товара'),
),
]
| [
"Smikhalcv@yandex.ru"
] | Smikhalcv@yandex.ru |
f3201f180930a559c2e0b5616789f4d32b47e9f5 | 1032ebbc585d0f9de33247ba6f30e2ffc8916aee | /slidescript/antlr3/dfa.py | bd923fbc85c5fca9c9724e0ea0ba9ebfffb724de | [] | no_license | mdornseif/Slidescript | 98db95cd55bc9838836b786cca1a4db18bb62375 | 60cc24049b75222edd4046afa08f6d1252709b33 | refs/heads/master | 2021-12-29T17:44:54.845035 | 2010-04-12T09:58:54 | 2010-04-12T09:58:54 | 544,207 | 0 | 0 | null | 2021-12-17T19:45:45 | 2010-03-03T08:02:10 | Python | UTF-8 | Python | false | false | 7,655 | py | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licensc]
from slidescript.antlr3.constants import EOF
from slidescript.antlr3.exceptions import NoViableAltException, BacktrackingFailed
class DFA(object):
"""@brief A DFA implemented as a set of transition tables.
Any state that has a semantic predicate edge is special; those states
are generated with if-then-else structures in a specialStateTransition()
which is generated by cyclicDFA template.
"""
def __init__(
self,
recognizer, decisionNumber,
eot, eof, min, max, accept, special, transition
):
## Which recognizer encloses this DFA? Needed to check backtracking
self.recognizer = recognizer
self.decisionNumber = decisionNumber
self.eot = eot
self.eof = eof
self.min = min
self.max = max
self.accept = accept
self.special = special
self.transition = transition
def predict(self, input):
"""
From the input stream, predict what alternative will succeed
using this DFA (representing the covering regular approximation
to the underlying CFL). Return an alternative number 1..n. Throw
an exception upon error.
"""
mark = input.mark()
s = 0 # we always start at s0
try:
for _ in xrange(50000):
#print "***Current state = %d" % s
specialState = self.special[s]
if specialState >= 0:
#print "is special"
s = self.specialStateTransition(specialState, input)
if s == -1:
self.noViableAlt(s, input)
return 0
input.consume()
continue
if self.accept[s] >= 1:
#print "accept state for alt %d" % self.accept[s]
return self.accept[s]
# look for a normal char transition
c = input.LA(1)
#print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF')
#print "range = %d..%d" % (self.min[s], self.max[s])
if c >= self.min[s] and c <= self.max[s]:
# move to next state
snext = self.transition[s][c-self.min[s]]
#print "in range, next state = %d" % snext
if snext < 0:
#print "not a normal transition"
# was in range but not a normal transition
# must check EOT, which is like the else clause.
# eot[s]>=0 indicates that an EOT edge goes to another
# state.
if self.eot[s] >= 0: # EOT Transition to accept state?
#print "EOT trans to accept state %d" % self.eot[s]
s = self.eot[s]
input.consume()
# TODO: I had this as return accept[eot[s]]
# which assumed here that the EOT edge always
# went to an accept...faster to do this, but
# what about predicated edges coming from EOT
# target?
continue
#print "no viable alt"
self.noViableAlt(s, input)
return 0
s = snext
input.consume()
continue
if self.eot[s] >= 0:
#print "EOT to %d" % self.eot[s]
s = self.eot[s]
input.consume()
continue
# EOF Transition to accept state?
if c == EOF and self.eof[s] >= 0:
#print "EOF Transition to accept state %d" \
# % self.accept[self.eof[s]]
return self.accept[self.eof[s]]
# not in range and not EOF/EOT, must be invalid symbol
self.noViableAlt(s, input)
return 0
else:
raise RuntimeError("DFA bang!")
finally:
input.rewind(mark)
def noViableAlt(self, s, input):
if self.recognizer._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException(
self.getDescription(),
self.decisionNumber,
s,
input
)
self.error(nvae)
raise nvae
def error(self, nvae):
"""A hook for debugging interface"""
pass
def specialStateTransition(self, s, input):
return -1
def getDescription(self):
return "n/a"
## def specialTransition(self, state, symbol):
## return 0
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(len(string) / 2):
(n, v) = ord(string[i*2]), ord(string[i*2+1])
# Is there a bitwise operation to do this?
if v == 0xFFFF:
v = -1
ret += [v] * n
return ret
unpack = classmethod(unpack)
| [
"md@hudora.de"
] | md@hudora.de |
274b0189eedce0785051ebff12043aba0b2e200d | 80593bc3dd02e80381b801f96820b28e82d9641c | /lib/deprecated/softphone2.py | c7616fd09893b6e457ce124a6636d965e353d5e8 | [] | no_license | mccrorey48/mtaf_private | 39045c1a4b5288b9b9340e29b419590c3beba6bf | 0c65aaedca5189a377a78776f52773eac5645bfa | refs/heads/master | 2023-04-11T08:22:47.455990 | 2018-04-30T18:20:14 | 2018-04-30T18:20:14 | 105,019,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,833 | py | # softphone class that uses simple_pj
import random
import re
from time import time, sleep
import lib.logging_esi as logging_esi
from lib.wrappers import Trace
import lib.softphone.simple_pj as pj
from lib.softphone.wav_audio import create_wav_file
from lib.user_exception import UserException as Ux, UserTimeoutException as Tx
log = logging_esi.get_logger('esi.softphone2')
class Softphone:
lib = None
pbfile = None
dst_uri = None
rec_id = None
rec_slot = None
@Trace(log)
def __init__(self, uri, proxy, password, null_snd=True, dns_list=None, tcp=False,
pbfile=None, rec=True, quiet=True):
self.uri = uri
self.pbfile = pbfile
if not self.lib:
Softphone.lib = pj.PjsuaLib()
self.lib.start(null_snd=null_snd, dns_list=dns_list, tcp=tcp)
if self.pbfile:
create_wav_file(self.pbfile, quiet)
m = re.match('sip:([^@]+)@(.+)', self.uri)
if m:
self.lib.add_account(m.group(1), m.group(2), proxy, password)
self.account_info = pj.account_infos[self.uri]
@Trace(log)
def wait_for_call_status(self, desired_status, timeout=30):
# possible desired_status values: 'call', 'idle', 'early', 'hold'
start = time()
while time() - start < timeout:
if self.account_info.call_status == desired_status:
return time() - start
sleep(0.1)
if self.account_info.call_status == 'call' and desired_status == 'early':
self.teardown_call()
raise Ux('wait for call status "early" terminated call because status was "call"')
else:
raise Tx('wait for call status "%s" timed out after %s seconds' % (desired_status, timeout))
@Trace(log)
def make_call(self, dst_uri):
self.dst_uri = dst_uri
if self.account_info.reg_status != 200:
raise Ux("Can't set up call, registration status (src) %s" % self.account_info.reg_status)
log.debug("%s calling %s" % (self.uri, self.dst_uri))
# print self.dst_uri
self.account_info.call = self.account_info.account.make_call_to_softphone(self.dst_uri)
self.account_info.call.set_callback(pj.MyCallCallback(self.account_info))
@Trace(log)
def end_call(self):
if not self.account_info.call:
raise Ux("end_call(): %s not in call" % self.uri)
log.debug("%s ending call to %s" % (self.uri, self.dst_uri))
self.account_info.call.hangup()
@Trace(log)
def leave_msg(self, length=None):
if not self.account_info.call:
raise Ux("leave_msg(): %s not in call" % self.uri)
sleep(10)
self.account_info.call.dial_dtmf('2')
if length is None:
random.seed(time())
length = random.randrange(10, 30, 1)
sleep(length)
def teardown_call(self):
if self.account_info.call:
self.account_info.call.hangup()
log.debug("%s hanging up" % self.uri)
log.debug("calling wait_for_call_status(%s, 'end', 15)" % self.uri)
self.wait_for_call_status('disconnected', 15)
@Trace(log)
def dial_dtmf(self, dtmf_string):
if self.account_info.call:
for c in list(dtmf_string):
log.debug('%s:send dtmf %s' % (self.uri, c))
self.account_info.call.dial_dtmf(c)
sleep(0.3)
@Trace(log)
def set_monitor_on(self):
pass
@Trace(log)
def set_monitor_off(self):
pass
@Trace(log)
def connect_media(self):
if self.rec_id is None:
raise Ux("connect_media: no media exists")
self.rec_slot = self.lib.recorder_get_slot(self.rec_id)
my_uri = self.call.info().account.info().uri
# self.media_call_slot is set to the call's conference slot when connecting media,
# and set to None when disconnecting, so if it is not None, this is a reconnect
if self.media_call_slot is not None:
# if self.media_call_slot is not None but is not the current call's conference slot,
# it isn't a reconnect, it's a structural program error
if self.media_call_slot != self.call.info().conf_slot:
raise Ux("connect_media: call at slot %d media already connected to call slot %d"
% (self.call.info().conf_slot, self.media_call_slot))
log.debug("%s: disconnecting call slot %d from recorder %s at slot %d"
% (my_uri, self.media_call_slot, self.rec_id, self.rec_slot))
lib.conf_disconnect(self.media_call_slot, self.rec_slot)
if self.player_id is not None:
self.pb_slot = lib.player_get_slot(self.player_id)
log.debug("%s: disconnecting player %s at slot %d to call slot %d"
% (my_uri, self.player_id, self.pb_slot, self.media_call_slot))
lib.conf_disconnect(self.pb_slot, self.media_call_slot)
self.media_call_slot = None
log.debug("%s: connecting call slot %d to recorder %s at slot %d"
% (my_uri, self.call.info().conf_slot, self.rec_id, self.rec_slot))
lib.conf_connect(self.call.info().conf_slot, self.rec_slot)
# if there is a player ID then the player was created during create_media and we can connect it, too
if self.player_id is not None:
self.pb_slot = lib.player_get_slot(self.player_id)
log.debug("%s: connecting player %s at slot %d to call slot %d"
% (my_uri, self.player_id, self.pb_slot, self.call.info().conf_slot))
lib.conf_connect(self.pb_slot, self.call.info().conf_slot)
self.media_call_slot = self.call.info().conf_slot
| [
"mmccrorey@esi-estech.com"
] | mmccrorey@esi-estech.com |
bda426d73a162db938d51aa0011a0c12b47e5d89 | 97caa124ffa5da9819c39a16c734165176d90349 | /projects/ideas/api/nba/nba_players.py | adde586a9f8bcb7246108819c3a5de18ab7919ce | [
"Apache-2.0"
] | permissive | YAtOff/python0 | dd684731065321fd52d475fd2b2105db59f5c19c | b5af5004131d64dd52d42746eddb72b6c43a13c7 | refs/heads/master | 2021-01-18T21:19:11.990434 | 2019-05-29T20:14:23 | 2019-05-29T20:14:23 | 44,601,010 | 6 | 7 | Apache-2.0 | 2019-10-31T22:45:21 | 2015-10-20T11:13:11 | Jupyter Notebook | UTF-8 | Python | false | false | 715 | py | """
NBA API
https://pypi.org/project/nba-api/
Преди да започнете инсталирайте:
pip install nba_api
https://github.com/swar/nba_api
"""
from nba_api.stats.static import players
from nba_api.stats.endpoints import commonplayerinfo
name = ""
while name != "exit":
name = input("Player name: ")
result = players.find_players_by_full_name(name)
if result:
player = result[0]
player_info = commonplayerinfo.CommonPlayerInfo(player_id=player["id"])
table = player_info.common_player_info.get_dict()
for name, value in zip(table["headers"], table["data"][0]):
print(name, value)
else:
print("No player found! Try again.")
| [
"yavor.atov@gmail.com"
] | yavor.atov@gmail.com |
2564ea2977644b8d4ec91ec350761e03c7cfff6f | f9e3a0fb511470561d3d94bc984dafaee06000cb | /9780596009250/PP3E-Examples-1.2/Examples/PP3E/System/Filetools/site-forward.py | 43c772418aad4c045863a31bed676eaa7d153913 | [
"LicenseRef-scancode-oreilly-notice"
] | permissive | Sorath93/Programming-Python-book | 359b6fff4e17b44b9842662f484bbafb490cfd3d | ebe4c93e265edd4ae135491bd2f96904d08a911c | refs/heads/master | 2022-12-03T01:49:07.815439 | 2020-08-16T22:19:38 | 2020-08-16T22:19:38 | 287,775,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | ###########################################################################
# Create forward link pages for relocating a web site.
# Generates one page for every existing site file; upload the generated
# files to your old web site. Performance note: the first 2 str.replace
# calls could be moved out of the for loop, but this runs in < 1 second
# on my Win98 machine for 150 site files. Lib note: the os.listdir call
# can be replaced with: sitefiles = glob.glob(sitefilesdir + os.sep + '*')
# but then the file/directory names must be split up manually with:
# dirname, filename = os.path.split(sitefile);
###########################################################################
import os
servername = 'starship.python.net' # where site is relocating to
homedir = '~lutz/home' # where site will be rooted
sitefilesdir = 'public_html' # where site files live locally
uploaddir = 'isp-forward' # where to store forward files
templatename = 'template.html' # template for generated pages
try:
os.mkdir(uploaddir) # make upload dir if needed
except OSError: pass
template = open(templatename).read() # load or import template text
sitefiles = os.listdir(sitefilesdir) # filenames, no directory prefix
count = 0
for filename in sitefiles:
fwdname = os.path.join(uploaddir, filename) # or + os.sep + filename
print 'creating', filename, 'as', fwdname
filetext = template.replace('$server$', servername) # insert text
filetext = filetext.replace('$home$', homedir) # and write
filetext = filetext.replace('$file$', filename) # file varies
open(fwdname, 'w').write(filetext)
count += 1
print 'Last file =>\n', filetext
print 'Done:', count, 'forward files created.'
| [
"Sorath.Soomro@isode.com"
] | Sorath.Soomro@isode.com |
ef153fa9651dace4d24ab5d1475eee7afaf808cb | 6cd690fb01e100f440289ea8fe7342bb58d37e78 | /tests/elemental/combat_elemental_tests.py | 1e175b4d862b30ee622ca4db805356341ff523d9 | [] | no_license | Hammerlord/Monbot | 6db8308ae492d7cfbb6f1bdff909105129924269 | fde8177d9170dddd958a89068a560008259d6e24 | refs/heads/master | 2020-03-07T16:43:20.019123 | 2019-08-29T03:08:33 | 2019-08-29T03:08:33 | 127,591,188 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,556 | py | import unittest
from src.elemental.ability.abilities.wait import Wait
from src.elemental.ability.ability import Ability
from src.elemental.combat_elemental import CombatElemental
from src.elemental.elemental import Elemental
from tests.elemental.elemental_builder import CombatElementalBuilder, ElementalBuilder
class CombatElementalTests(unittest.TestCase):
"""
Tests for CombatElemental, the wrapper class generated when an Elemental enters combat.
"""
def setUp(self):
self.elemental = self.get_elemental()
self.combat_elemental = CombatElementalBuilder().with_elemental(self.elemental).build()
def tearDown(self):
self.combat_elemental = None
self.elemental = None
def get_elemental(self) -> Elemental:
return ElementalBuilder() \
.with_current_hp(5) \
.with_max_hp(50) \
.build()
def get_combat_elemental(self) -> CombatElemental:
return CombatElementalBuilder().with_elemental(self.get_elemental()).build()
def test_starting_mana(self):
error = "CombatElemental didn't have the correct amount of starting mana"
combat_elemental_mana = self.combat_elemental.current_mana
expected_mana = self.elemental.starting_mana
self.assertEqual(combat_elemental_mana, expected_mana, error)
def test_starting_hp(self):
error = "CombatElemental's HP didn't refer to its Elemental's HP"
combat_elemental_hp = self.combat_elemental.current_hp
expected_hp = self.elemental.current_hp
self.assertEqual(combat_elemental_hp, expected_hp, error)
def test_defend_charges(self):
error = "CombatElemental's Defend charges didn't refer to its Elemental's"
combat_elemental_charges = self.combat_elemental.defend_charges
min_charges = 2 # All Elementals have at least two Defend charges
self.assertGreaterEqual(combat_elemental_charges, min_charges, error)
def test_defend_available(self):
error = "CombatElemental didn't have defend available as an ability"
abilities = self.combat_elemental.abilities
has_defend = any([ability for ability in abilities if ability.name == "Defend"])
self.assertTrue(has_defend, error)
def test_defend_unavailable(self):
error = "Defend was available even though there were no defend charges"
self.combat_elemental.update_defend_charges(-self.combat_elemental.defend_charges)
abilities = self.combat_elemental.available_abilities
has_defend = any([ability for ability in abilities if ability.name == "Defend"])
self.assertFalse(has_defend, error)
def test_has_abilities(self):
error = "CombatElemental doesn't have Abilities"
abilities = self.combat_elemental.abilities
self.assertGreater(len(abilities), 0, error)
self.assertIsInstance(abilities[0], Ability, error)
def test_bide_available(self):
error = "Bide wasn't available when there were no other usable abilities"
self.combat_elemental._abilities = []
self.assertIsInstance(self.combat_elemental.available_abilities[0], Wait, error)
def test_bide_unavailable(self):
error = "Bide shouldn't be available if anything else is available"
is_bide_available = any([ability for ability in self.combat_elemental.available_abilities
if ability.name == Wait().name])
self.assertFalse(is_bide_available, error)
def test_take_damage(self):
error = "Reference Elemental didn't take damage when CombatElemental took damage"
prev_hp = self.elemental.current_hp
self.combat_elemental.receive_damage(2, self.get_combat_elemental())
current_hp = self.elemental.current_hp
expected_hp = prev_hp - 2
self.assertEqual(current_hp, expected_hp, error)
def test_heal(self):
error = "Reference Elemental didn't heal when CombatElemental healed"
prev_hp = self.elemental.current_hp
self.combat_elemental.heal(5)
current_hp = self.elemental.current_hp
expected_hp = prev_hp + 5
self.assertEqual(current_hp, expected_hp, error)
def test_stat_change(self):
error = "Reference Elemental's stats incorrectly changed when CombatElemental's stats changed"
# TODO
def test_overkill(self):
error = "Elemental's HP didn't set to 0 on overkill"
self.combat_elemental.receive_damage(200, self.get_combat_elemental())
current_hp = self.elemental.current_hp
expected_hp = 0
self.assertEqual(current_hp, expected_hp, error)
def test_overheal(self):
error = "Elemental's HP didn't set to max HP on overheal"
self.combat_elemental.heal(100)
current_hp = self.elemental.current_hp
expected_hp = 50
self.assertEqual(current_hp, expected_hp, error)
def test_knockout_flag(self):
error = "CombatElemental wasn't flagged as knocked out at 0 HP"
self.combat_elemental.receive_damage(12, self.get_combat_elemental())
knocked_out = self.combat_elemental.is_knocked_out
self.assertIs(knocked_out, True, error)
def test_gain_mana(self):
error = "CombatElemental didn't gain mana on turn start"
mana_before_turn = self.combat_elemental.current_mana
self.combat_elemental.start_turn()
mana_after_turn = self.combat_elemental.current_mana
self.assertGreater(mana_after_turn, mana_before_turn, error)
| [
"nepharus@gmail.com"
] | nepharus@gmail.com |
30a1e31371ef290579d9c7c19f8771ad60bf07c6 | 6e631bd7f138abb9f7eb0d936a8615287248b697 | /Home/DaysBetween.py | 76d67e4c8f9b064e75254e0764b99006c53ced5b | [] | no_license | ankiwoong/Check_Io | 24494390a851fad91f173c5e81a4eedfad7cfe6e | f417dbf1c1cce316ca25d51d645e228e7b03bf9c | refs/heads/master | 2022-06-03T14:59:04.571112 | 2020-04-25T11:29:55 | 2020-04-25T11:29:55 | 254,514,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | '''
Days Between
We have prepared a set of Editor's Choice Solutions. You will see them first after you solve the mission. In order to see all other solutions you should change the filter.
How old are you in a number of days? It's easy to calculate - just subtract your birthday from today. We could make this a real challenge though and count the difference between any dates.
You are given two dates as an array with three numbers - a year, month and day. For example: 19 April 1982 will be (1982, 4, 19). You should find the difference in days between the given dates. For example between today and tomorrow = 1 day. The difference will always be either a positive number or zero, so don't forget about the absolute value.
Input:
Two dates as tuples of integers.
Output:
The difference between the dates in days as an integer.
Example:
days_diff((1982, 4, 19), (1982, 4, 22)) == 3
days_diff((2014, 1, 1), (2014, 8, 27)) == 238
How it is used: Python has batteries included, so in this mission you’ll need to learn how to use completed modules so that you don't have to invent the bicycle all over again.
Precondition:
Dates between 1 january 1 and 31 december 9999. Dates are correct.
def days_diff(a, b):
# your code here
return None
if __name__ == '__main__':
print("Example:")
print(days_diff((1982, 4, 19), (1982, 4, 22)))
# These "asserts" are used for self-checking and not for an auto-testing
assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3
assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238
assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238
print("Coding complete? Click 'Check' to earn cool rewards!")
'''
from datetime import datetime
def days_diff(date1, date2):
# datetime를 사용하여서 년 월 일을 지정한다.
# 각 년 / 월 / 일은 입력되는 인덱싱 번호로 추출한다.
# 출력값은 year-month-day 00:00:00 로 나온다.
date1 = datetime(year=date1[0], month=date1[1], day=date1[2])
date2 = datetime(year=date2[0], month=date2[1], day=date2[2])
# date2에서 date1의 날짜를 빼면 되므로 절대값을 줘서 계산 후 반환 한다.
return abs((date2 - date1).days)
if __name__ == '__main__':
print("Example:")
print(days_diff((1982, 4, 19), (1982, 4, 22)))
# These "asserts" are used for self-checking and not for an auto-testing
assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3
assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238
assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"ankiwoong@gmail.com"
] | ankiwoong@gmail.com |
de53426f1c73e86d0c2bf22d218d124d89f7947c | f9033131dc4d66ede2c5c22fcaa4a0be5b682152 | /BinaryTrees/Tasks/eolymp(3326).py | 504d4e87e8e265aa065e0b26f1c53ee7288a6ca3 | [] | no_license | Invalid-coder/Data-Structures-and-algorithms | 9bd755ce3d4eb11e605480db53302096c9874364 | 42c6eb8656e85b76f1c0043dcddc9c526ae12ba1 | refs/heads/main | 2023-04-29T08:40:34.661184 | 2021-05-19T10:57:37 | 2021-05-19T10:57:37 | 301,458,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,782 | py | class BinaryTree:
def __init__(self, key):
self.key = key
self.leftChild = None
self.rightChild = None
def hasLeft(self):
return not self.leftChild is None
def hasRight(self):
return not self.rightChild is None
def setLeft(self, key):
self.leftChild = BinaryTree(key)
def setRight(self, key):
self.rightChild = BinaryTree(key)
def insert(self, key):
node = self
while True:
if node.key == key:
break
elif node.key > key:
if node.hasLeft():
node = node.leftChild
else:
node.setLeft(key)
break
elif node.key < key:
if node.hasRight():
node = node.rightChild
else:
node.setRight(key)
break
def isSameTree(self, tree):
ans = True
def _isSameTree(node1, node2):
nonlocal ans
if node1.hasLeft() and node2.hasLeft():
_isSameTree(node1.leftChild, node2.leftChild)
else:
if node1.hasLeft() and not node2.hasLeft():
ans = False
elif node2.hasLeft() and not node1.hasLeft():
ans = False
if node1.hasRight() and node2.hasRight():
_isSameTree(node1.rightChild, node2.rightChild)
else:
if node1.hasRight() and not node2.hasRight():
ans = False
elif node2.hasRight() and not node1.hasRight():
ans = False
_isSameTree(self, tree)
return ans
def createTree(nodes):
tree = BinaryTree(nodes[0])
i = 1
while i < len(nodes):
tree.insert(nodes[i])
i += 1
return tree
def findSequences(tree, n, m):
sequences = []
def _findSequences(sequence):
if len(sequence) == n:
tree1 = createTree(sequence)
if tree.isSameTree(tree1):
if not sequence in sequences:
sequences.append(sequence)
return
for i in range(1, m + 1):
if not i in sequence:
next_seq = sequence[:]
next_seq.append(i)
_findSequences(next_seq)
_findSequences([])
return len(sequences)
if __name__ == '__main__':
t = int(input())
for _ in range(t):
n, m = map(int, input().split())
nodes = tuple(map(int, input().split()))
tree = BinaryTree(nodes[0])
i = 1
while i < len(nodes):
tree.insert(nodes[i])
i += 1
print(findSequences(tree, n, m))
| [
"gusevvovik@gmail.com"
] | gusevvovik@gmail.com |
60e71440b4fa46560d11572b5594307fa09e7b55 | e7af5a3e76e674be0a85628067fa494348d45123 | /Python-for-Finance-Second-Edition-master/Chapter12/c12_28_basic_income_best.py | 360b70b1ba061b5ab73780ecb6b715663f4dfd9d | [
"MIT"
] | permissive | SeyedShobeiri/Work | 8321ead6f11de8297fa18d70a450602f700f26fb | f758e758106fbd53236a7fadae42e4ec6a4e8244 | refs/heads/master | 2022-07-25T02:33:25.852521 | 2020-05-17T16:11:27 | 2020-05-17T16:11:27 | 264,706,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,820 | py |
"""
Name : c12_28_basic_income_best.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
original : https://gist.github.com/stucchio/7447067
"""
from pylab import *
from scipy.stats import *
#input area
million=1e6 # unit of million
billion=1e9 # unit of billion
trillion=1e12 # unit of trillion
tiny=1e-7 # a small number
hourlyPay = 7.5 # hourly wage
workingHoursPerWeek=40 # working hour per week
workingWeeksPerYear=50 # working weeks per year
nAdult = 227*million # number of adult
laborForce = 154*million # labor force
disabledAdults = 21*million # disability
nSimulations = 1024*32 # number of simulations
#
basicIncome = hourlyPay*workingHoursPerWeek*workingWeeksPerYear
# define a few function
def geniusEffect(nNonWorkers):
nGenious = binom(nNonWorkers,tiny).rvs()
return nGenious* billion
#
def costBasicIncome():
salaryCost= nAdult * basicIncome
unitAdmCost = norm(250,75)
nonWorkerMultiplier = uniform(-0.10, 0.15).rvs()
nonWorker0=nAdult-laborForce-disabledAdults
nNonWorker = nonWorker0*(1+nonWorkerMultiplier)
marginalWorkerHourlyProductivity = norm(10,1)
admCost = nAdult * unitAdmCost.rvs()
unitBenefitNonWorker=40*52*marginalWorkerHourlyProductivity.rvs()
benefitNonWorkers = 1 * (nNonWorker*unitBenefitNonWorker)
geniusBenefit=geniusEffect(nNonWorker)
totalCost=salaryCost + admCost - benefitNonWorkers-geniusBenefit
return totalCost
#
def costBasicJob():
unitAdmCost4disabled= norm(500,150).rvs()
unitAdmCost4worker = norm(5000, 1500).rvs()
nonWorkerMultiplier = uniform(-0.20, 0.25).rvs()
hourlyProductivity = uniform(0.0, hourlyPay).rvs()
cost4disabled=disabledAdults * (basicIncome + unitAdmCost4disabled)
nBasicWorkers=((nAdult-disabledAdults-laborForce)*(1+nonWorkerMultiplier))
annualCost=workingHoursPerWeek*workingWeeksPerYear*hourlyProductivity
cost4workers=nBasicWorkers * (basicIncome+unitAdmCost4worker-annualCost)
return cost4disabled + cost4workers
#
# take a long time here!!!
N = nSimulations
costBI = zeros(shape=(N,),dtype=float)
costBJ = zeros(shape=(N,),dtype=float)
for k in range(N):
costBI[k] = costBasicIncome()
costBJ[k] = costBasicJob()
#
def myPlot(data,myTitle,key):
subplot(key)
width = 4e12
height=50*N/1024
title(myTitle)
#xlabel("Cost (Trillion = 1e12)")
hist(data, bins=50)
axis([0,width,0,height])
#
myPlot(costBI,"Basic Income",211)
myPlot(costBJ,"Basic Job",212)
show()
| [
"shobeiri@math.uh.edu"
] | shobeiri@math.uh.edu |
1cf9bc616d68317e6e54b595f5fa04659ec0aa69 | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/third_party/2/kazoo/client.pyi | 683596f0fc6c615216bdead3db0ebdda388f94a5 | [
"MIT",
"Apache-2.0"
] | permissive | ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | Python | UTF-8 | Python | false | false | 3,879 | pyi | from typing import Any
string_types = ... # type: Any
bytes_types = ... # type: Any
LOST_STATES = ... # type: Any
ENVI_VERSION = ... # type: Any
ENVI_VERSION_KEY = ... # type: Any
log = ... # type: Any
class KazooClient:
logger = ... # type: Any
handler = ... # type: Any
auth_data = ... # type: Any
default_acl = ... # type: Any
randomize_hosts = ... # type: Any
hosts = ... # type: Any
chroot = ... # type: Any
state = ... # type: Any
state_listeners = ... # type: Any
read_only = ... # type: Any
retry = ... # type: Any
Barrier = ... # type: Any
Counter = ... # type: Any
DoubleBarrier = ... # type: Any
ChildrenWatch = ... # type: Any
DataWatch = ... # type: Any
Election = ... # type: Any
NonBlockingLease = ... # type: Any
MultiNonBlockingLease = ... # type: Any
Lock = ... # type: Any
Party = ... # type: Any
Queue = ... # type: Any
LockingQueue = ... # type: Any
SetPartitioner = ... # type: Any
Semaphore = ... # type: Any
ShallowParty = ... # type: Any
def __init__(self, hosts=..., timeout=..., client_id=..., handler=..., default_acl=..., auth_data=..., read_only=..., randomize_hosts=..., connection_retry=..., command_retry=..., logger=..., **kwargs) -> None: ...
@property
def client_state(self): ...
@property
def client_id(self): ...
@property
def connected(self): ...
def set_hosts(self, hosts, randomize_hosts=...): ...
def add_listener(self, listener): ...
def remove_listener(self, listener): ...
def start(self, timeout=...): ...
def start_async(self): ...
def stop(self): ...
def restart(self): ...
def close(self): ...
def command(self, cmd=...): ...
def server_version(self, retries=...): ...
def add_auth(self, scheme, credential): ...
def add_auth_async(self, scheme, credential): ...
def unchroot(self, path): ...
def sync_async(self, path): ...
def sync(self, path): ...
def create(self, path, value=..., acl=..., ephemeral=..., sequence=..., makepath=...): ...
def create_async(self, path, value=..., acl=..., ephemeral=..., sequence=..., makepath=...): ...
def ensure_path(self, path, acl=...): ...
def ensure_path_async(self, path, acl=...): ...
def exists(self, path, watch=...): ...
def exists_async(self, path, watch=...): ...
def get(self, path, watch=...): ...
def get_async(self, path, watch=...): ...
def get_children(self, path, watch=..., include_data=...): ...
def get_children_async(self, path, watch=..., include_data=...): ...
def get_acls(self, path): ...
def get_acls_async(self, path): ...
def set_acls(self, path, acls, version=...): ...
def set_acls_async(self, path, acls, version=...): ...
def set(self, path, value, version=...): ...
def set_async(self, path, value, version=...): ...
def transaction(self): ...
def delete(self, path, version=..., recursive=...): ...
def delete_async(self, path, version=...): ...
def reconfig(self, joining, leaving, new_members, from_config=...): ...
def reconfig_async(self, joining, leaving, new_members, from_config): ...
class TransactionRequest:
client = ... # type: Any
operations = ... # type: Any
committed = ... # type: Any
def __init__(self, client) -> None: ...
def create(self, path, value=..., acl=..., ephemeral=..., sequence=...): ...
def delete(self, path, version=...): ...
def set_data(self, path, value, version=...): ...
def check(self, path, version): ...
def commit_async(self): ...
def commit(self): ...
def __enter__(self): ...
def __exit__(self, exc_type, exc_value, exc_tb): ...
class KazooState:
...
| [
"ryan@gniadek.net"
] | ryan@gniadek.net |
83ebf0a66825f6e61ab543b4e72c1939cbe90293 | 57e148ea3ebc4a7476a661ce4332fdc15912934d | /cf 606 div 2 C.py | ee6cc6c8cbec456b68807eceb9a6cee0e076571c | [] | no_license | FahimSifnatul/online_problem_solving_with_FahimSifnatul_python_version | 20f99a59dda8083ac4cf220b0cd4b45b34262fa3 | 6e1e54b78ba5d64ba4bb5edee507277fe2c1a186 | refs/heads/master | 2022-12-24T10:57:06.212206 | 2020-10-07T05:29:54 | 2020-10-07T05:29:54 | 265,504,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from sys import stdin,stdout
ans,pos = [], 0
for i in range(int(input())):
s = stdin.readline()
cnt, j, l = 0, 0, len(s)-2
while j <= l-2:
tmp = s[j] + s[j+1] + s[j+2]
if j <= l-4 and tmp+s[j+3]+s[j+4] == 'twone':
ans.append(str(j+3))
ans.append(' ')
cnt += 1
j += 5
elif tmp in ['one','two']:
ans.append(str(j+2))
ans.append(' ')
cnt += 1
j += 3
else:
j += 1
ans.append('\n')
ans.insert(pos, str(cnt))
ans.insert(pos+1, '\n')
pos = len(ans)
stdout.write(''.join(ans))
| [
"noreply@github.com"
] | FahimSifnatul.noreply@github.com |
45de7de2ca65f57cb6a7a2c82425aa4a63f7f879 | 2127cabeeda296f7a6b692982872d91e8bdd3016 | /tests/test_schema_priority.py | 15535c1c3ad072aac439c0948434e442010679ea | [
"Apache-2.0"
] | permissive | nomilkinmyhome/dataclass_factory | 26059993af95509e386793c42fd743d6f08e1079 | 7bcbd395acd5c61806ae36042067a7f9882cec28 | refs/heads/master | 2022-11-18T21:51:40.308764 | 2020-03-26T08:51:08 | 2020-03-26T08:51:08 | 279,984,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,731 | py | from dataclasses import dataclass
from unittest import TestCase
from dataclass_factory import Factory, Schema
@dataclass
class Data:
a: str = ""
b: str = ""
c_: str = ""
_d: str = ""
class TestFactory(TestCase):
def test_only_mapping(self):
factory = Factory(
schemas={
Data: Schema(
only=("b",),
name_mapping={"a": "A"},
only_mapped=True,
),
}
)
data = Data("AA", "BB", "CC")
serial = {"b": "BB"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "b": "BB"}
data2 = Data(b="BB")
self.assertEqual(factory.load(serial, Data), data2)
def test_only_exclude(self):
factory = Factory(
schemas={
Data: Schema(
only=("a", "b",),
exclude=("a",)
),
}
)
data = Data("AA", "BB", "CC")
serial = {"b": "BB"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "b": "BB"}
data2 = Data(b="BB")
self.assertEqual(factory.load(serial, Data), data2)
def test_trailing_mapping(self):
factory = Factory(
schemas={
Data: Schema(
name_mapping={"c_": "c_"},
trim_trailing_underscore=True,
),
}
)
data = Data("AA", "BB", "CC")
serial = {"a": "AA", "b": "BB", "c_": "CC"}
self.assertEqual(factory.dump(data), serial)
self.assertEqual(factory.load(serial, Data), data)
def test_internal_only(self):
factory = Factory(
schemas={
Data: Schema(
only=("_d",),
skip_internal=True,
),
}
)
data = Data("AA", "BB", "CC", "DD")
serial = {"_d": "DD"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "_d": "DD"}
data2 = Data(_d="DD")
self.assertEqual(factory.load(serial, Data), data2)
def test_internal_mapping(self):
factory = Factory(
schemas={
Data: Schema(
name_mapping={"_d": "_d"},
skip_internal=True,
),
}
)
data = Data("AA", "BB", "CC", "DD")
serial = {"a": "AA", "b": "BB", "c": "CC", "_d": "DD"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "_d": "DD"}
data2 = Data(a="XXX", _d="DD")
self.assertEqual(factory.load(serial, Data), data2)
| [
"tishka17@mail.ru"
] | tishka17@mail.ru |
72fd3453bb6ac3f150bff9dbcaa458288016d216 | 8379cb63b570eb29c2d2e52b37960ea350fe6be3 | /datasets/create_bond_dataset.py | 953ce6f39ef64c0eccda9e81138d9b55caacd945 | [] | no_license | biotite-dev/biotite-util | 3ad622cee28a556ef37c2abf7dabee8f4ae91bfb | 3c5bcce9411c6f1bd5c12df91b1c091c5eff84ab | refs/heads/master | 2022-10-13T21:49:25.040376 | 2022-09-18T15:51:42 | 2022-09-18T15:51:42 | 152,449,276 | 1 | 1 | null | 2022-09-18T15:51:43 | 2018-10-10T15:46:49 | Python | UTF-8 | Python | false | false | 2,164 | py | import argparse
import msgpack
import biotite.structure as struc
import biotite.structure.io.pdbx as pdbx
BOND_ORDERS = {
("SING", "N") : struc.BondType.SINGLE,
("DOUB", "N") : struc.BondType.DOUBLE,
("TRIP", "N") : struc.BondType.TRIPLE,
("QUAD", "N") : struc.BondType.QUADRUPLE,
("SING", "Y") : struc.BondType.AROMATIC_SINGLE,
("DOUB", "Y") : struc.BondType.AROMATIC_DOUBLE,
("TRIP", "Y") : struc.BondType.AROMATIC_TRIPLE,
}
def create_bond_dict(components_pdbx_file_path, msgpack_file_path):
pdbx_file = pdbx.PDBxFile()
pdbx_file.read(components_pdbx_file_path)
components = pdbx_file.get_block_names()
bond_dict = {}
for i, component in enumerate(components):
print(f"{component:3} {int(i/len(components)*100):>3d}%", end="\r")
cif_bonds = pdbx_file.get_category(
"chem_comp_bond", block=component, expect_looped=True
)
if cif_bonds is None:
# No bond info for this compound
continue
else:
group_bonds = {}
for atom1, atom2, order, aromatic_flag in zip(
cif_bonds["atom_id_1"], cif_bonds["atom_id_2"],
cif_bonds["value_order"], cif_bonds["pdbx_aromatic_flag"]
):
bond_type = BOND_ORDERS[order, aromatic_flag]
group_bonds[(atom1, atom2)] = bond_type
bond_dict[component] = group_bonds
with open(msgpack_file_path, "wb") as msgpack_file:
msgpack.dump(bond_dict, msgpack_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create a dataset, that contains the information which "
"atoms are connected in a given residue. "
"The information is based on a 'components.cif' file."
)
parser.add_argument(
"infile",
help="The path to the 'components.cif' file to be read."
)
parser.add_argument(
"outfile",
help="The path to the file, where the output MessagePack file should "
"be placed."
)
args = parser.parse_args()
create_bond_dict(args.infile, args.outfile) | [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
83156090bb8b3b0a4cc1a33ac9e451f5d4f13a09 | 6a33cb94d4af1d8a7329ddc6c9d42f870c35bb2f | /python/100+/euler131.py | 4d34b9b3dbb1e9573a034fa5863f2228130191e4 | [] | no_license | vochong/project-euler | 836321cc8e7d2e7cdf22b3b136d44dcba74a8701 | 6a0c7103861ff825bf84800b6e2e62819a41e36d | refs/heads/master | 2020-04-29T10:41:48.487159 | 2018-09-19T00:13:34 | 2018-09-19T00:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from fractions import gcd
def isPrime(n):
for i in range(2, int(n**0.5)+1):
if n % i == 0: return False
return True
def euler131():
m = 2
n = m-1
ps = 0
while m**3 - n**3 < 1000000:
p = m**3 - n**3
if isPrime(p): ps += 1
m += 1
n = m-1
return ps
if __name__ == "__main__":
print euler131() | [
"kueltz.anton@gmail.com"
] | kueltz.anton@gmail.com |
6bcbd7cb96b17a945779fd33d2772b582faa191c | 2ba8378d2028305c2582a2d5d16a91527d207040 | /Soilder.py | 1b6e9968b690cf80f1a63808b6568dd247d9e2ac | [] | no_license | giridhararao/guvi | 41bf15e7dbd8ca3494f2e7ada5b42737e80fefe8 | e67e245a2b31463f39087430bce0f7cf5bc92b4a | refs/heads/master | 2020-03-22T06:04:27.296180 | 2019-03-30T06:52:05 | 2019-03-30T06:52:05 | 139,610,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | def factors1(n) :
L = []
i = 2
cnt = 0
while n >1 :
while n%i == 0 :
cnt += 1
n //= i
i += 1
return cnt
n = int(input())
L = [input().split() for i in range(0,n)]
for i in range(0,n) :
a = 1
n, k = L[i]
n, k = int(n), int(k)
for i in range(k+1,n+1) :
a = a*i
out = factors1(a)
print(out)
| [
"noreply@github.com"
] | giridhararao.noreply@github.com |
ed430e4a8ec7376b2596b58d7285585e9507bec0 | 609a4bb18ffd8e93ef28da6762266d852c9aca54 | /src/h02_bert_embeddings/bert_per_word.py | 9343c3f703ec8633f3d6dd09f9e3c73e45675912 | [
"MIT"
] | permissive | tpimentelms/lexical-ambiguity-in-context | 0fe9a6835451bc2d5abcba65654e7049109ded67 | 5277b9e0f1a846b5fe93eeba1cf37de2d48cfc62 | refs/heads/main | 2023-05-31T03:25:34.499572 | 2021-06-02T15:04:37 | 2021-06-02T15:04:37 | 373,110,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | import torch
import torch.nn as nn
from transformers import BertModel
from utils import constants
class BertPerWordModel(nn.Module):
# pylint: disable=arguments-differ
def __init__(self, bert_option):
super().__init__()
self.bert = self.get_bert(bert_option)
@staticmethod
def get_bert(bert_option):
model = BertModel.from_pretrained(bert_option)
return model
def forward(self, x, attention_mask, mappings):
output, _ = self.bert(x, attention_mask=attention_mask)
return self.from_bpe_to_word(output, mappings)
def from_bpe_to_word(self, output, mappings):
batch_size = output.size(0)
longest_token_sent = mappings.size(1)
hidden_states = output[:, 1:-1]
embedding_size = output.size(-1)
hidden_states_per_token = torch.zeros(
(batch_size, longest_token_sent, embedding_size)).to(device=constants.device)
mask_start = torch.zeros(batch_size).long().to(device=constants.device)
for mask_pos in range(0, longest_token_sent):
mask_sizes = mappings[:, mask_pos]
hidden_states_per_token[:, mask_pos] = \
self.sum_bpe_embeddings(hidden_states, mask_start, mask_sizes)
mask_start += mask_sizes
return hidden_states_per_token
@staticmethod
def sum_bpe_embeddings(hidden_states, mask_start, mask_sizes):
mask_idxs = []
for i, (sent_start, sent_size) in enumerate(zip(mask_start, mask_sizes)):
mask_idxs += [(i, sent_start.item() + x) for x in range(sent_size)]
mask_idxs = list(zip(*mask_idxs))
hidden_states_temp = \
torch.zeros_like(hidden_states).float().to(device=constants.device)
hidden_states_temp[mask_idxs] = hidden_states[mask_idxs]
embedding_size = hidden_states.size(-1)
return hidden_states_temp.sum(dim=1) / \
mask_sizes.unsqueeze(-1).repeat(1, embedding_size).float()
| [
"tiagopms@gmail.com"
] | tiagopms@gmail.com |
0479a264837198ef41a9305938f1a57efdcd97d3 | 75e8d0da60b0e9456058eee70ada47ed11e953a2 | /584A.py | db56edd5936777f92ff2e128d8c3d63cc31d75f5 | [] | no_license | smirnoffmg/codeforces | 87aa12596b4927e5b5620369a5d4fb52330c51f7 | 1b0e7feb051c7b7c5c4e46351e122a050d1561ac | refs/heads/master | 2021-10-11T10:09:24.019018 | 2019-01-24T13:54:35 | 2019-01-24T13:54:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # -*- coding: utf-8 -*-
n, t = map(int, raw_input().split(' '))
if t == 10:
if n > 1:
print(10 ** (n - 1))
else:
print(-1)
else:
f = 10 ** (n - 1) % t
print(10 ** (n - 1) + (t-f))
| [
"smirnoffmg@gmail.com"
] | smirnoffmg@gmail.com |
a29184b368126daa37dbeb89a73cfc13478bb9a9 | c3dc08fe8319c9d71f10473d80b055ac8132530e | /challenge-173/roger-bell-west/python/ch-1.py | 3ee4659182a9b33fabe21f977355cec1a0bd03bc | [] | no_license | southpawgeek/perlweeklychallenge-club | d4b70d9d8e4314c4dfc4cf7a60ddf457bcaa7a1e | 63fb76188e132564e50feefd2d9d5b8491568948 | refs/heads/master | 2023-01-08T19:43:56.982828 | 2022-12-26T07:13:05 | 2022-12-26T07:13:05 | 241,471,631 | 1 | 0 | null | 2020-02-18T21:30:34 | 2020-02-18T21:30:33 | null | UTF-8 | Python | false | false | 809 | py | #! /usr/bin/python3
import unittest
def esthetic(n0, base):
n = n0
pdigit = 0
ch = False
while n > 0:
digit = n % base
if ch and abs(digit - pdigit) != 1:
return False
ch = True
pdigit = digit
n //= base
return True
def esthetic10(n):
return esthetic(n, 10)
class TestEsthetic(unittest.TestCase):
def test_ex1(self):
self.assertEqual(esthetic10(5456),True,'example 1')
def test_ex2(self):
self.assertEqual(esthetic10(120),False,'example 1')
def test_ex3(self):
self.assertEqual(esthetic10(12),True,'example 1')
def test_ex4(self):
self.assertEqual(esthetic10(5654),True,'example 1')
def test_ex5(self):
self.assertEqual(esthetic10(890),False,'example 1')
unittest.main()
| [
"roger@firedrake.org"
] | roger@firedrake.org |
d8f05b966238a7358ae208514b580ac1cdfb8039 | 0792f5f7432ef3320c16e717671726289d1db3be | /filetest.py | 7c1f8dad9c01c68c7b9df6e8a0cdd1053bf2b2b9 | [] | no_license | hujiangyi/autoupgrade | 47a28ee9751d555a11d7697105b17af1f2d6c13c | 1f6cae3a264d4ce639283bda10df97e216d1fa40 | refs/heads/master | 2020-04-13T04:53:07.185244 | 2019-05-17T06:20:06 | 2019-05-17T06:20:06 | 162,974,411 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 jay <hujiangyi@dvt.dvt.com>
#
cmdfile = open('./collectDataCmd.txt', "r")
for line in cmdfile.readlines():
print line.strip() | [
"hujiangyi@dvt.dvt.com"
] | hujiangyi@dvt.dvt.com |
2888b0395bdc3f6679dfc6bde5a66fad4551756e | 339901caa0cbb3bd2762ad83bb9f847c01b0df39 | /rice_RILs_mPing_scripts/Construction_of_recombination_bin_and_linkage_map/scripts/genotype/Tab2SNP.py | ec1d59a1b3f5eef7fce9a54aabae2c80828dece8 | [] | no_license | stajichlab/Dynamic_rice_publications | e592e83a4842eff7354e06e5368e6f7590b472ee | 93ac8732d64b7ab4831a0b0b9b1593efc5814805 | refs/heads/master | 2020-03-24T05:10:56.815367 | 2020-02-11T07:26:17 | 2020-02-11T07:26:17 | 142,477,743 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | #!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
import glob
from Bio import SeqIO
import subprocess
import multiprocessing as mp
import gzip
def usage():
test="name"
message='''
python Tab2SNP.py --input RILs_ALL_bam_correct
Convert genotype.tab to Maq.p1.map.pileup.SNP.
'''
print message
#/rhome/cjinfeng/HEG4_cjinfeng/RILs/QTL_pipe/bin/RILs_ALL_275line_core/NB.RILs.dbSNP.SNPs.Markers
#SNP_id Allele
#0100021547A A
def read_parents(infile):
data = defaultdict(lambda : str())
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'SNP_id'):
unit = re.split(r'\t',line)
data[unit[0]] = unit[1]
return data
##CHROM POS REF RIL103_0_GAGTGG_FC1213L5
#Chr1 31071 A A/A
##0100031071A GN278 G
def convert_tab2SNP(infile, markers, outfile):
#outfile = re.sub(r'.genotype.tab', r'.Maq.p1.map.pileup.SNP', infile)
ofile = open (outfile, 'w')
with gzip.open (infile, 'r') as filehd:
headers = re.split(r'\t', filehd.readline())
rils = re.split(r'_', headers[-1])
ril = re.sub(r'RIL', r'GN', rils[0])
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'#'):
unit = re.split(r'\t',line)
#pos = int(unit[0][2:-1])
chrs = re.sub(r'Chr', r'', unit[0])
pos = '%02d%08d%s' %(int(chrs), int(unit[1]), unit[2])
if unit[3][0] == unit[3][2] and not unit[3][0] == '.':
if markers.has_key(pos):
print >> ofile, '%s\t%s\t%s' %(pos, ril, unit[3][0])
ofile.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
markers = read_parents('NB.RILs.dbSNP.SNPs.Markers')
snp_files = glob.glob('%s/*.genotype.tab.gz' %(args.input))
#convert_tab2SNP('RILs_ALL_bam_correct/GN87.genotype.tab', markers)
for tab in sorted(snp_files):
snp = re.sub(r'.genotype.tab.gz', r'.Maq.p1.map.pileup.SNP', tab)
if not os.path.exists(snp):
print '%s to %s' %(tab, snp)
convert_tab2SNP(tab, markers, snp)
if __name__ == '__main__':
main()
| [
"jinfeng7chen@gmail.com"
] | jinfeng7chen@gmail.com |
45689ce9ef1065b6e18e0fd34e1d78d680c0cb51 | 162eed4191aef4431f94a0db1ad4185b6daa6f67 | /supervised_learning/0x00-binary_classification/21-deep_neural_network.py | f28ab914fda9b74ef9fc8a9f068b6e0624e1b5b5 | [] | no_license | giovannyortegon/holbertonschool-machine_learning | d6897bfb492f9d266302930927416637be3c172d | 8cd5e0f837a5c0facbf73647dcc9c6a3b1b1b9e0 | refs/heads/master | 2022-12-31T15:34:20.489690 | 2020-10-24T03:37:01 | 2020-10-24T03:37:01 | 279,656,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,321 | py | #!/usr/bin/env python3
""" deep neural network """
import numpy as np
class DeepNeuralNetwork:
""" DeepNeuralNetwork - defines a deep neural network
"""
@staticmethod
def he_et_al(nx, layers):
""" The weights initialized using the He et al """
if type(layers) is not list or len(layers) == 0:
raise TypeError("layers must be a list of positive integers")
weights = dict()
for i in range(len(layers)):
if type(layers[i]) is not int:
raise TypeError("layers must be a list of positive integers")
layer = layers[i - 1] if i > 0 else nx
W1 = np.random.randn(layers[i], layer)
W2 = np.sqrt(2 / layer)
weights.update({'W' + str(i + 1): W1 * W2,
'b' + str(i + 1): np.zeros((layers[i], 1))
})
return weights
def __init__(self, nx, layers):
""" DeepNeuralNetwork - public instance attributes
Args:
nx is the number of input features
layers is a list representing the number of nodes
L: The number of layers in the neural network.
cache: A dictionary to hold all intermediary values of the network.
weights: A dictionary to hold all weights
and biased of the network.
"""
if type(nx) is not int:
raise TypeError("nx must be an integer")
elif nx < 1:
raise ValueError("nx must be a positive integer")
else:
self.nx = nx
if type(layers) is not list or len(layers) == 0:
raise TypeError("layers must be a list of positive integers")
else:
self.layers = layers
arrprob = np.array(self.layers)
lenarr = arrprob[arrprob >= 1].shape[0]
if len(self.layers) != lenarr:
raise TypeError("layers must be a list of positive integers")
self.__L = len(self.layers)
self.__cache = {}
self.__weights = self.he_et_al(nx, layers)
@property
def L(self):
""" L - number of layers
Args:
__L number of layers
Return:
return __L Private instance
"""
return self.__L
@property
def cache(self):
""" cache - A dictionary to hold all intermediary valuess
Args:
__cache A dictionary to hold all intermediary values
Return:
Return __cache Private instance
"""
return self.__cache
@property
def weights(self):
""" weights - A dictionary to hold all weights and biased
Args:
__weights A dictionary to hold all weights and biased
Return:
Return __weights Private instance
"""
return self.__weights
def forward_prop(self, X):
""" forward_prop - Calculates the forward propagation
of the neural network.
Args:
X contains the input data.
Return:
Returns the output of the neural network and the cache
"""
self.__cache["A0"] = X
for i in range(self.__L):
w = "W" + str(i + 1)
b = "b" + str(i + 1)
a = "A" + str(i + 1)
z = np.matmul(self.__weights[w], self.__cache["A" + str(i)]) + \
self.__weights[b]
self.__cache[a] = 1 / (1 + np.exp(-z))
return self.__cache[a], self.__cache
def cost(self, Y, A):
""" cost - Calculates the cost of the model using
logistic regression
Args:
Y contains the correct labels for the input data
A containing the activated output of the neuron
for each example.
Return:
Returns the cost
"""
m = Y.shape[1]
logprobs1 = np.multiply(np.log(A), Y)
logprobs2 = np.multiply(np.log(1.0000001 - A), (1 - Y))
cost = -(1 / m) * np.sum(logprobs1 + logprobs2)
return cost
def evaluate(self, X, Y):
""" evaluate - Evaluates the neuron’s predictions
Args:
X - (nx, m) that contains the input data.
Y - (1, m) contains the correct labels for the input data.
Return
Returns the neuron’s prediction and the cost of the network.
"""
A, _ = self.forward_prop(X)
cost = self.cost(Y, A)
return np.where(A >= 0.5, 1, 0), cost
def gradient_descent(self, Y, cache, alpha=0.05):
"""
"""
m = Y.shape[1]
weights = self.__weights.copy()
for i in range(self.__L, 0, -1):
W = weights.get('W' + str(i))
W1 = weights.get('W' + str(i + 1))
A = self.__cache['A' + str(i)]
A1 = self.__cache['A' + str(i - 1)]
b = weights['b' + str(i)]
if i == self.__L:
dZ = A - Y
else:
dZ = np.matmul(W1.T, dZ1) * (A * (1 - A))
dW = (1 / m) * np.matmul(dZ, A1.T)
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)
dZ1 = dZ
self.__weights['W' + str(i)] = W - (dW * alpha)
self.__weights['b' + str(i)] = b - (db * alpha)
| [
"ortegon.giovanny@hotmail.com"
] | ortegon.giovanny@hotmail.com |
0ce1310744f3da08e0fa94833be91e0a0a8e6cbf | 0172fee2851e3d02b855a53d8b63b262d169e6a5 | /ptsites/sites/pttime.py | 75c6b76556862fd4b682da3f9345ccdd374cb347 | [
"MIT"
] | permissive | Tuohai-Li/flexget_qbittorrent_mod | 3f3f9df45680d27853f44bee6421ceb750d9d01e | 81e3bb473f82dce759da3795b2e89bfc7717d3bb | refs/heads/master | 2023-09-02T17:46:42.077469 | 2021-11-17T04:06:53 | 2021-11-17T04:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | from ..schema.nexusphp import AttendanceHR
from ..schema.site_base import Work, SignState
from ..utils.net_utils import NetUtils
class MainClass(AttendanceHR):
URL = 'https://www.pttime.org/'
USER_CLASSES = {
'downloaded': [3221225472000, 16106127360000],
'share_ratio': [3.05, 4.55],
'days': [112, 364]
}
def build_workflow(self, entry, config):
return [
Work(
url='/attendance.php',
method='get',
succeed_regex=[
'这是你的第.*?次签到,已连续签到.*天,本次签到获得.*个魔力值。',
'获得魔力值:\\d+',
'你今天已经签到过了,请勿重复刷新。'],
check_state=('final', SignState.SUCCEED),
is_base_content=True
)
]
def build_selector(self):
selector = super(MainClass, self).build_selector()
NetUtils.dict_merge(selector, {
'detail_sources': {
'default': {
'elements': {
'bar': '#info_block',
'table': '#outer table.main:last-child'
}
}
}
})
return selector
def get_nexusphp_message(self, entry, config):
super(MainClass, self).get_nexusphp_message(entry, config, unread_elements_selector='td > i[alt*="Unread"]')
| [
"12468675@qq.com"
] | 12468675@qq.com |
c82569cd70f74e9a2395eb42b18ea0e8e570ef28 | 0693cce8efbeca806f4551c22dce60d5f392c5c9 | /contentful_management/content_type_resource_proxy.py | ee4655778b24c6a08f53eb004c1fabac287e1758 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | timwis/contentful-management.py | 2dc4b7389ca2136ee2a12b89812b18ef2a347e67 | d71a0e18205d1de821b41c7225e8244e786be7f3 | refs/heads/master | 2021-06-28T12:04:58.130393 | 2017-08-10T16:30:09 | 2017-08-10T16:32:50 | 103,517,328 | 0 | 0 | null | 2017-09-14T10:04:48 | 2017-09-14T10:04:48 | null | UTF-8 | Python | false | false | 1,557 | py | """
contentful_management.content_type_resource_proxy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the ContentTypeResourceProxy class.
:copyright: (c) 2017 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class ContentTypeResourceProxy(object):
"""Base class for content type related resource proxies."""
def __init__(self, client, space_id, content_type_id):
self.proxy = self._resource_proxy_class()(client, space_id, content_type_id)
def __repr__(self):
return "<{0} space_id='{1}' content_type_id='{2}'>".format(
self.__class__.__name__,
self.proxy.space_id,
self.proxy.content_type_id
)
def _resource_proxy_class(self):
raise Exception("Must implement")
def all(self, query=None):
"""
Gets all resources related to the current content type.
"""
return self.proxy.all(query)
def find(self, resource_id, query=None):
"""
Finds a single resource by ID related to the current content type.
"""
return self.proxy.find(resource_id, query)
def create(self, resource_id=None, attributes=None):
"""
Creates a resource with a given ID (optional) and attributes for the current content type.
"""
return self.proxy.create(resource_id=resource_id, attributes=attributes)
def delete(self, resource_id):
"""
Deletes a resource by ID.
"""
return self.proxy.delete(resource_id)
| [
"david.litvakb@gmail.com"
] | david.litvakb@gmail.com |
1033b438cc562e7a9ed44ead113b464f37380f81 | 1e70fa970f3c0f7d71273b5aaf97e2dfdaf249ec | /axi/planner.py | 62a712bebc75653c3622814abd414afbfcadfb36 | [
"MIT"
] | permissive | sgentle/axi | fbbd423560078878f4fdcc2e5bb6354d04077634 | cc4551e990713aa28fd6a3e10634b825041f6e3e | refs/heads/master | 2021-04-06T15:32:58.904837 | 2018-03-08T11:30:37 | 2018-03-08T11:30:37 | 124,370,678 | 1 | 0 | null | 2018-03-08T09:45:20 | 2018-03-08T09:45:20 | null | UTF-8 | Python | false | false | 7,163 | py | from __future__ import division
from bisect import bisect
from collections import namedtuple
from math import sqrt, hypot
# a planner computes a motion profile for a list of (x, y) points
class Planner(object):
def __init__(self, acceleration, max_velocity, corner_factor):
self.acceleration = acceleration
self.max_velocity = max_velocity
self.corner_factor = corner_factor
def plan(self, points):
return constant_acceleration_plan(
points, self.acceleration, self.max_velocity, self.corner_factor)
def plan_all(self, paths):
return [self.plan(path) for path in paths]
# a plan is a motion profile generated by the planner
class Plan(object):
def __init__(self, blocks):
self.blocks = blocks
self.ts = [] # start time of each block
self.ss = [] # start distance of each block
t = 0
s = 0
for b in blocks:
self.ts.append(t)
self.ss.append(s)
t += b.t
s += b.s
self.t = t # total time
self.s = s # total duration
def instant(self, t):
t = max(0, min(self.t, t)) # clamp t
i = bisect(self.ts, t) - 1 # find block for t
return self.blocks[i].instant(t - self.ts[i], self.ts[i], self.ss[i])
# a block is a constant acceleration for a duration of time
class Block(object):
def __init__(self, a, t, vi, p1, p2):
self.a = a
self.t = t
self.vi = vi
self.p1 = p1
self.p2 = p2
self.s = p1.distance(p2)
def instant(self, t, dt=0, ds=0):
t = max(0, min(self.t, t)) # clamp t
a = self.a
v = self.vi + self.a * t
s = self.vi * t + self.a * t * t / 2
s = max(0, min(self.s, s)) # clamp s
p = self.p1.lerps(self.p2, s)
return Instant(t + dt, p, s + ds, v, a)
# an instant gives position, velocity, etc. at a single point in time
Instant = namedtuple('Instant', ['t', 'p', 's', 'v', 'a'])
# a = acceleration
# v = velocity
# s = distance
# t = time
# i = initial
# f = final
# vf = vi + a * t
# s = (vf + vi) / 2 * t
# s = vi * t + a * t * t / 2
# vf * vf = vi * vi + 2 * a * s
EPS = 1e-9
_Point = namedtuple('Point', ['x', 'y'])
class Point(_Point):
def length(self):
return hypot(self.x, self.y)
def normalize(self):
d = self.length()
if d == 0:
return Point(0, 0)
return Point(self.x / d, self.y / d)
def distance(self, other):
return hypot(self.x - other.x, self.y - other.y)
def add(self, other):
return Point(self.x + other.x, self.y + other.y)
def sub(self, other):
return Point(self.x - other.x, self.y - other.y)
def mul(self, factor):
return Point(self.x * factor, self.y * factor)
def dot(self, other):
return self.x * other.x + self.y * other.y
def lerps(self, other, s):
v = other.sub(self).normalize()
return self.add(v.mul(s))
Triangle = namedtuple('Triangle',
['s1', 's2', 't1', 't2', 'vmax', 'p1', 'p2', 'p3'])
def triangle(s, vi, vf, a, p1, p3):
# compute a triangular profile: accelerating, decelerating
s1 = (2 * a * s + vf * vf - vi * vi) / (4 * a)
s2 = s - s1
vmax = (vi * vi + 2 * a * s1) ** 0.5
t1 = (vmax - vi) / a
t2 = (vf - vmax) / -a
p2 = p1.lerps(p3, s1)
return Triangle(s1, s2, t1, t2, vmax, p1, p2, p3)
Trapezoid = namedtuple('Trapezoid',
['s1', 's2', 's3', 't1', 't2', 't3', 'p1', 'p2', 'p3', 'p4'])
def trapezoid(s, vi, vmax, vf, a, p1, p4):
# compute a trapezoidal profile: accelerating, cruising, decelerating
t1 = (vmax - vi) / a
s1 = (vmax + vi) / 2 * t1
t3 = (vf - vmax) / -a
s3 = (vf + vmax) / 2 * t3
s2 = s - s1 - s3
t2 = s2 / vmax
p2 = p1.lerps(p4, s1)
p3 = p1.lerps(p4, s - s3)
return Trapezoid(s1, s2, s3, t1, t2, t3, p1, p2, p3, p4)
def corner_velocity(s1, s2, vmax, a, delta):
# compute a maximum velocity at the corner of two segments
# https://onehossshay.wordpress.com/2011/09/24/improving_grbl_cornering_algorithm/
cosine = -s1.vector.dot(s2.vector)
if abs(cosine - 1) < EPS:
return 0
sine = sqrt((1 - cosine) / 2)
if abs(sine - 1) < EPS:
return vmax
v = sqrt((a * delta * sine) / (1 - sine))
return min(v, vmax)
class Segment(object):
# a segment is a line segment between two points, which will be broken
# up into blocks by the planner
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = p1.distance(p2)
self.vector = p2.sub(p1).normalize()
self.max_entry_velocity = 0
self.entry_velocity = 0
self.blocks = []
def constant_acceleration_plan(points, a, vmax, cf):
# make sure points are Point objects
points = [Point(x, y) for x, y in points]
# create segments for each consecutive pair of points
segments = [Segment(p1, p2) for p1, p2 in zip(points, points[1:])]
# compute a max_entry_velocity for each segment
# based on the angle formed by the two segments at the vertex
for s1, s2 in zip(segments, segments[1:]):
v = corner_velocity(s1, s2, vmax, a, cf)
s2.max_entry_velocity = v
# add a dummy segment at the end to force a final velocity of zero
segments.append(Segment(points[-1], points[-1]))
# loop over segments
i = 0
while i < len(segments) - 1:
# pull out some variables
segment = segments[i]
next_segment = segments[i + 1]
s = segment.length
vi = segment.entry_velocity
vexit = next_segment.max_entry_velocity
p1 = segment.p1
p2 = segment.p2
# determine which profile to use for this segment
m = triangle(s, vi, vexit, a, p1, p2)
if m.s1 < -EPS:
# too fast! update max_entry_velocity and backtrack
segment.max_entry_velocity = sqrt(vexit * vexit + 2 * a * s)
i -= 1
elif m.s2 < 0:
# accelerate
vf = sqrt(vi * vi + 2 * a * s)
t = (vf - vi) / a
segment.blocks = [
Block(a, t, vi, p1, p2),
]
next_segment.entry_velocity = vf
i += 1
elif m.vmax > vmax:
# accelerate, cruise, decelerate
z = trapezoid(s, vi, vmax, vexit, a, p1, p2)
segment.blocks = [
Block(a, z.t1, vi, z.p1, z.p2),
Block(0, z.t2, vmax, z.p2, z.p3),
Block(-a, z.t3, vmax, z.p3, z.p4),
]
next_segment.entry_velocity = vexit
i += 1
else:
# accelerate, decelerate
segment.blocks = [
Block(a, m.t1, vi, m.p1, m.p2),
Block(-a, m.t2, m.vmax, m.p2, m.p3),
]
next_segment.entry_velocity = vexit
i += 1
# concatenate all of the blocks
blocks = []
for segment in segments:
blocks.extend(segment.blocks)
# filter out zero-duration blocks and return
blocks = [b for b in blocks if b.t > EPS]
return Plan(blocks)
| [
"fogleman@gmail.com"
] | fogleman@gmail.com |
be0ace15d0a17f5c8094cd0c651183e8263917e7 | c6cbc2adf3acd58c8892874d0172b9844129595f | /web_flask/3-python_route.py | a85c50ff586ccdb64ea212a5e68027cc3b9e586f | [] | no_license | Joldiazch/AirBnB_clone_v2 | 4dac319e03f94ba677e5a3e17801958223c78552 | a9c2d54991e2e956fe27c89ece0ecc3400b045c1 | refs/heads/master | 2021-05-25T21:12:54.056693 | 2020-04-22T23:52:46 | 2020-04-22T23:52:46 | 253,921,574 | 1 | 0 | null | 2020-04-07T21:59:16 | 2020-04-07T21:59:16 | null | UTF-8 | Python | false | false | 824 | py | #!/usr/bin/python3
""" that starts a Flask web application """
# import flask
from flask import Flask
app = Flask(__name__)
# strict_slashes allow that this route work with /my_route and /my_route/
@app.route('/', strict_slashes=False)
def root():
""" return Hello HBNB """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb():
""" return HBNB """
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def show_text(text):
text = text.replace('_', ' ')
return 'C {}'.format(text)
@app.route('/python', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def show_python_text(text='is cool'):
text = text.replace('_', ' ')
return 'Python {}'.format(text)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5000)
| [
"jluis.diaz@udea.edu.co"
] | jluis.diaz@udea.edu.co |
4f824041f4d0d7bd13c488ef541522624434f08a | 90bc7032cda25da6541a976f37e5b9f491b70bd0 | /nrlbio/chimirna/lfc2cdf.py | 9c5e2d701dd00f6abf6a59d897007688e137812b | [] | no_license | afilipch/nrlbio | da89bb262e9b900b5b71cf14612ace9630263e61 | d05258e4b1e352130999a23e4d2b9717b8f834c9 | refs/heads/master | 2020-04-12T01:21:19.231931 | 2017-03-17T16:16:08 | 2017-03-17T16:16:08 | 52,795,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | #! /usr/bin/python
'''produces table of LFC CDF for tables of genes'''
import sys;
import copy;
import os;
import argparse;
from collections import *;
import itertools
from scipy.stats import ks_2samp, mannwhitneyu
parser = argparse.ArgumentParser(description='Script outputs presence of certain binding modes in the interactions');
parser.add_argument('path', metavar = 'N', nargs = '+', type = str, help = "path to gene names tables");
parser.add_argument('--lfc', nargs = '?', type = str, help = "path to the table which connects gene names to LFC")
parser.add_argument('-o', '--output', nargs = '?', type = str, help = "name of the output")
args = parser.parse_args();
def get_genes(path):
genes = set();
f = open(path);
for l in f:
genes.add(l.strip());
f.close()
return genes;
gene_list = [];
for path in args.path:
gene_list.append(get_genes(path));
lfc = {};
f = open(args.lfc);
for l in f:
a = l.strip().split("\t");
try:
lfc[a[0]] = float(a[1]);
except:
pass;
f.close();
lfc_list =[lfc.values()];
for genes in gene_list:
tlfc = [];
for g in genes:
try:
tlfc.append(lfc[g]);
except:
pass;
#print sys.stderr.write("%s\n" % g);
lfc_list.append(copy.copy(tlfc));
##output
o = open(args.output, 'w')
for i in range(len(lfc)):
a = []
for el in lfc_list:
try:
v = str(el[i])
except:
v = " "
a.append(v);
o.write("\t".join(a) + "\n")
o.close()
for i in range(len(lfc_list)):
print ('set n%d\tlength %d' % (i, len(lfc_list[i])))
#>>> output p-values:
for k1, k2 in itertools.combinations(list(range(len(lfc_list))), 2):
print "differnce between gene set %d and gene set %d" % (k1+1,k2+1)
print sys.stderr.write("KS statistics \t%1.3f\tp value\t%.2e\n" % ks_2samp(lfc_list[k1], lfc_list[k2]));
print
#print sys.stderr.write("KS statistics \t%1.3f\tp value\t%.2e\n" % ks_2samp(lfc_list[1] + lfc_list[2], lfc_list[0] ));
| [
"afilipch@a58d7b47-41b6-4d21-bc94-2c2c30e24b6a"
] | afilipch@a58d7b47-41b6-4d21-bc94-2c2c30e24b6a |
9f38fdd55389eccffedf6e73cd9bedac9c27be08 | 9120120ee0b52f24c627759b0901afbc55347529 | /pygis_src/ch06_spatialite/sec3_access_sqlite_via_python/test_3_import_shapefile_x_x.py | 959744afe35441f3c6f3152ce4654d40af5fc49d | [] | no_license | xiaomozi/book_python_gis | d999b17833abe746a7be2683595f48b54071cd59 | 754fa10c17a20506146d8f409e035e4d4869ad3e | refs/heads/master | 2020-03-11T18:36:40.739267 | 2018-01-04T16:31:23 | 2018-01-04T16:31:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
import os; import sqlite3 as sqlite
dbfile = 'xx_shapedb.sqlite'
if os.path.exists(dbfile): os.remove(dbfile)
db = sqlite.connect(dbfile)
db.enable_load_extension(True)
db.execute('SELECT load_extension("mod_spatialite.so.7")')
cursor = db.cursor()
cursor.execute('SELECT InitSpatialMetaData();')
cursor.execute("DROP TABLE IF EXISTS gshhs")
cursor.execute("CREATE TABLE gshhs (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT, " +
"level INTEGER)")
cursor.execute("CREATE INDEX gshhs_level on gshhs(level)")
cursor.execute("SELECT AddGeometryColumn('gshhs', 'geom', " +
"4326, 'POLYGON', 2)")
cursor.execute("SELECT CreateSpatialIndex('gshhs', 'geom')")
db.commit()
sql_tpl = "INSERT INTO gshhs (level, geom) VALUES (2, GeomFromText('{0}', 4326))"
import ogr
fName = '/gdata/GSHHS_l/GSHHS_l_L2.shp'
shapefile = ogr.Open(fName)
layer = shapefile.GetLayer(0)
for i in range(layer.GetFeatureCount()):
feature = layer.GetFeature(i)
geometry = feature.GetGeometryRef()
wkt = geometry.ExportToWkt()
cursor.execute( sql_tpl.format(wkt))
db.commit()
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
9de3642c57a0d6237c06147471395720fc4207a4 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/gui/scaleform/daapi/view/meta/moduleinfometa.py | d7d29dd3d8f30586b5f883e94565df3dd822e3ea | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 967 | py | # 2015.11.10 21:27:57 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ModuleInfoMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class ModuleInfoMeta(AbstractWindowView):
def onCancelClick(self):
self._printOverrideError('onCancelClick')
def onActionButtonClick(self):
self._printOverrideError('onActionButtonClick')
def as_setModuleInfoS(self, moduleInfo):
if self._isDAAPIInited():
return self.flashObject.as_setModuleInfo(moduleInfo)
def as_setActionButtonS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setActionButton(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\moduleinfometa.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:27:57 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
5bac41f8f6ba96763a05d3c22ca3bc5063f102a4 | fff80d8049aa19dacc01e48a21032fa74f069441 | /Chapter_15_Generating_Data/mpl_squares_correct.py | 6eb1554d6bb2d9a12e7bcb4d1c83c1173e396f52 | [
"MIT"
] | permissive | charliealpha094/Project_Data_Visualization | a77d5e8290de0fa416394e188e349bf198499ff1 | ccd55db58927dbbcfd57ab750fe7b21754c2b2dc | refs/heads/master | 2022-11-29T13:24:52.725995 | 2020-08-09T09:20:02 | 2020-08-09T09:20:02 | 285,671,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #Done by Carlos Amaral (19/07/2020)
#Styling and correcting
import matplotlib.pyplot as plt
input_values = [1,2,3,4,5]
squares = [1, 4, 9, 16, 25]
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(input_values, squares, linewidth = 3)
#Set chart title and label axes.
ax.set_title("Square Numbers", fontsize = 24)
ax.set_xlabel("Value", fontsize = 14)
ax.set_ylabel("Square of Value", fontsize = 14)
#Set size tick labels.
ax.tick_params(axis = 'both', labelsize = 14)
plt.show() | [
"carlosamaral94@gmail.com"
] | carlosamaral94@gmail.com |
335642bb7e305c8a7e2f0448c2a1ec8d75c1a15b | 60dd6073a3284e24092620e430fd05be3157f48e | /tiago_public_ws/build/pal_gripper/pal_gripper_controller_configuration/catkin_generated/pkg.installspace.context.pc.py | 72d62140668fd30c806c4d71575a513fdfde1439 | [] | no_license | SakshayMahna/Programming-Robots-with-ROS | e94d4ec5973f76d49c81406f0de43795bb673c1e | 203d97463d07722fbe73bdc007d930b2ae3905f1 | refs/heads/master | 2020-07-11T07:28:00.547774 | 2019-10-19T08:05:26 | 2019-10-19T08:05:26 | 204,474,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_gripper_controller_configuration"
PROJECT_SPACE_DIR = "/media/root/BuntuDrive/Programming-Robots-with-ROS/tiago_public_ws/install"
PROJECT_VERSION = "1.0.2"
| [
"sakshum19@gmail.com"
] | sakshum19@gmail.com |
999b8d39e8434c5a1188b45fd7c163f18721d4d2 | db3662b35d48cc2b34b48350db603bb08fd47e81 | /tf_api/arithmetic_experiment.py | eb4d6e474336695b88f01debfe787147f2456e1b | [] | no_license | JuneXia/handml | 70b4c172940e0e10b2775ec0dad462f2b08f47bf | 35c23568065178e48347b440851ad5a38db5f93e | refs/heads/master | 2020-04-02T01:59:23.073289 | 2020-02-11T02:57:16 | 2020-02-11T02:57:16 | 153,885,392 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # 各种算术运算实验
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__1':
# ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8],dtype = tf.int32)
# indices = tf.constant([4, 3, 1, 7],dtype = tf.int32)
# updates = tf.constant([9, 10, 11, 12],dtype = tf.int32)
ref = tf.Variable([1, 2, 3, 4], dtype=tf.int32)
indices = tf.constant([1, 3], dtype=tf.int32)
updates = tf.constant([1, 3], dtype=tf.int32)
sub = tf.scatter_sub(ref, indices, updates)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
arr = sess.run(sub)
print(arr)
if __name__ == '__main__':
learning_rate = 0.1
decay_rate = 0.6
global_steps = 1000
decay_steps = 100
# global_ = tf.Variable(tf.constant(0))
global_ = tf.Variable(0)
c = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=True)
d = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=False)
T_C = []
F_D = []
with tf.Session() as sess:
for i in range(global_steps):
T_c = sess.run(c, feed_dict={global_: i})
T_C.append(T_c)
F_d = sess.run(d, feed_dict={global_: i})
F_D.append(F_d)
plt.figure(1)
plt.plot(range(global_steps), F_D, 'r-')
plt.plot(range(global_steps), T_C, 'b-')
plt.show()
| [
"junxstudio@sina.com"
] | junxstudio@sina.com |
1cec5040d91a46de4c6181b2f40f9673101e9b6d | f8d2521a88e465eed01adc3981c7a173d5c2554b | /etc/educational/round0001-0025/round0003/a1.py | 852c5741db4bf0680c354c4cf3bf35dd431c8deb | [] | no_license | clarinet758/codeforces | b2a8a349bba40e7761a8ce50dd5ff9a57477b60d | d79870c47bdb109547891a0d076dd173d6d647cf | refs/heads/main | 2021-12-15T05:46:51.000160 | 2021-12-01T12:01:33 | 2021-12-01T12:01:33 | 41,968,658 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import sys
import io
import re
import math
import itertools
import collections
import bisect
#sys.stdin=file('input.txt')
#sys.stdout=file('output.txt','w')
#10**9+7
mod=1000000007
#mod=1777777777
pi=3.141592653589
IS=float('inf')
xy=[(1,0),(-1,0),(0,1),(0,-1)]
bs=[(-1,-1),(-1,1),(1,1),(1,-1)]
def niten(a,b): return abs(a-b) if a>=0 and b>=0 else a+abs(b) if a>=0 else abs(a)+b if b>=0 else abs(abs(a)-abs(b))
def gcd(a,b): return a if b==0 else gcd(b,a%b)
def lcm(a,b): return a*b/gcd(a,b)
def euclid_dis(x1,y1,x2,y2): return ((x1-x2)**2+(y1-y2)**2)**0.5
def choco(xa,ya,xb,yb,xc,yc,xd,yd): return 1 if abs((yb-ya)*(yd-yc)+(xb-xa)*(xd-xc))<1.e-10 else 0
n=int(raw_input())
m=int(raw_input())
a=[]
for i in range(n):
a.append(int(raw_input()))
a.sort()
ans=chk=0
for i,j in enumerate(a[::-1]):
ans+=j
if ans>=m:
print i+1
break
exit()
n,k=map(int,raw_input().split())
l=map(int,raw_input().split())
#end = time.clock()
#print end - start
| [
"clarinet758@gmail.com"
] | clarinet758@gmail.com |
365149b42675dd45c83444b739330f8d68f7586e | d6de6311ab2794cd3cce36ae0d1e591330941e8a | /2019/day09/part1_and_2.py | 623945fb25630ccddae81196e6974a98fb25a7e6 | [] | no_license | candyer/Advent-of-Code | a5346cffb4b9b1f45691c7f1b8d45bfd480b3fc0 | d5c6758c5b1feb66c4afb8ee773085a3751d8b37 | refs/heads/master | 2022-03-27T23:07:42.712877 | 2019-12-12T23:11:54 | 2019-12-12T23:11:54 | 112,863,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | import sys
from typing import List, Tuple, Dict
from collections import defaultdict
def breakDown(num: int) -> Tuple[int, List[int]]:
modes = list(map(int, str(num)[:-2][::-1]))
modes.extend([0] * (3 - len(modes)))
return (num % 100, modes)
def solve(d: Dict[int, int], inpt: int) -> int:
relative_base = 0
output = 0
i = 0
while True:
opcode, modes = breakDown(d[i])
if opcode == 99:
return output
a = i + 1 if modes[0] == 1 else d[i + 1] if modes[0] == 0 else d[i + 1] + relative_base
b = i + 2 if modes[1] == 1 else d[i + 2] if modes[1] == 0 else d[i + 2] + relative_base
c = d[i + 3] if modes[2] == 0 else d[i + 3] + relative_base
############################################################
if opcode == 1:
d[c] = d[a] + d[b]
i += 4
elif opcode == 2:
d[c] = d[a] * d[b]
i += 4
elif opcode == 3:
d[a] = inpt
i += 2
elif opcode == 4:
i += 2
output = d[a]
elif opcode == 5:
if d[a]:
i = d[b]
else:
i += 3
elif opcode == 6:
if d[a]:
i += 3
else:
i = d[b]
elif opcode == 7:
if d[a] < d[b]:
d[c] = 1
else:
d[c] = 0
i += 4
elif opcode == 8:
if d[a] == d[b]:
d[c] = 1
else:
d[c] = 0
i += 4
elif opcode == 9:
relative_base += d[a]
i += 2
if __name__ == '__main__':
for line in sys.stdin:
d = defaultdict(int)
i = 0
for num in map(int, line.split(',')):
d[i] = num
i += 1
print('part1 result: {}'.format(solve(d, 1)))
print('part2 result: {}'.format(solve(d, 2)))
| [
"candyer@users.noreply.github.com"
] | candyer@users.noreply.github.com |
f8532b1c94622cc29700d629fee455cb052c8cc0 | 1aaaca67031d81eabb07e9e9fb1a4fcae9de7462 | /dictionary.py | 090cb29f273b4decfd6bba42bd4c69c4ede70104 | [] | no_license | supriyo-pal/Joy-Of-Computing-Using-Python-All-programms | 9e08bdf4c2a88cc360c0cb296b217230f0ae0b2c | bd450dfdbc879e0b200d03fa9106ece09456fa8c | refs/heads/main | 2023-01-21T07:28:02.312421 | 2020-12-02T09:18:08 | 2020-12-02T09:18:08 | 317,161,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 10:24:41 2020
@author: Supriyo
"""
#dictionary is represented by {}
#example of dictionary {key:value}
conv_factor={'dollar':60,'euro': 80 }
print(conv_factor)
print(conv_factor["dollar"])
print(conv_factor["euro"]) | [
"noreply@github.com"
] | supriyo-pal.noreply@github.com |
5c77f798c033cb12c4cf21c9e04ca72aa05a8927 | ead94ab55e0dc5ff04964a23b16cc02ab3622188 | /src/commands/pendingsubscr.py | e90bca3836fe62c655cb330efdf290ad4acc1fdf | [] | no_license | caifti/openstack-security-integrations | 88d92d6fcfb2a5a438f3a7e98f2e738d7434476f | bddd51675fe0ad4123f23520f3fdc6a793bf7bbc | refs/heads/master | 2023-03-11T02:49:38.442233 | 2020-12-16T07:39:12 | 2020-12-16T07:39:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | # Copyright (c) 2014 INFN - "Istituto Nazionale di Fisica Nucleare" - Italy
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.db import transaction
from django.conf import settings
from django.core.management.base import CommandError
from openstack_auth_shib.models import PrjRequest
from openstack_auth_shib.models import RegRequest
from openstack_auth_shib.models import EMail
from openstack_auth_shib.models import PrjRole
from openstack_auth_shib.models import PSTATUS_PENDING
from openstack_auth_shib.notifications import notifyUser
from openstack_auth_shib.notifications import SUBSCR_REMINDER
from horizon.management.commands.cronscript_utils import CloudVenetoCommand
LOG = logging.getLogger("pendingsubscr")
class Command(CloudVenetoCommand):
def handle(self, *args, **options):
super(Command, self).handle(options)
admin_table = dict()
mail_table = dict()
req_table = dict()
try:
with transaction.atomic():
for prj_req in PrjRequest.objects.filter(flowstatus=PSTATUS_PENDING):
prjname = prj_req.project.projectname
if not req_table.has_key(prjname):
req_table[prjname] = list()
req_table[prjname].append(prj_req.registration.username)
for prjname in req_table.keys():
for p_role in PrjRole.objects.filter(project__projectname=prjname):
user_name = p_role.registration.username
user_id = p_role.registration.userid
user_tuple = (user_name, user_id)
if not admin_table.has_key(user_tuple):
admin_table[user_tuple] = list()
admin_table[user_tuple].append(p_role.project.projectname)
if not mail_table.has_key(user_name):
tmpres = EMail.objects.filter(registration__username=user_name)
if len(tmpres):
mail_table[user_name] = tmpres[0].email
for user_tuple in admin_table:
for prj_name in admin_table[user_tuple]:
try:
noti_params = {
'pendingreqs' : req_table[prj_name],
'project' : prj_name
}
notifyUser(mail_table[user_tuple[0]], SUBSCR_REMINDER, noti_params,
dst_user_id=user_tuple[1])
except:
LOG.error("Cannot notify pending subscription: %s" % user_tuple[0],
exc_info=True)
except:
LOG.error("Cannot notify pending subscritions: system error", exc_info=True)
raise CommandError("Cannot notify pending subscritions")
| [
"paolo.andreetto@pd.infn.it"
] | paolo.andreetto@pd.infn.it |
8570f0b581dbd4ad8bbf7a06e2040630abddadc0 | 9784a90cac667e8e0aaba0ca599b4255b215ec67 | /chainer_/datasets/svhn_cls_dataset.py | 41da30bcecc81e26b5d6dc68f2b4f4fc67cabb8f | [
"MIT"
] | permissive | osmr/imgclsmob | d2f48f01ca541b20119871393eca383001a96019 | f2993d3ce73a2f7ddba05da3891defb08547d504 | refs/heads/master | 2022-07-09T14:24:37.591824 | 2021-12-14T10:15:31 | 2021-12-14T10:15:31 | 140,285,687 | 3,017 | 624 | MIT | 2022-07-04T15:18:37 | 2018-07-09T12:57:46 | Python | UTF-8 | Python | false | false | 1,587 | py | """
SVHN classification dataset.
"""
import os
from chainer.dataset import DatasetMixin
from chainer.datasets.svhn import get_svhn
from .cifar10_cls_dataset import CIFAR10MetaInfo
class SVHN(DatasetMixin):
"""
SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/.
Each sample is an image (in 3D NDArray) with shape (32, 32, 3).
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0`.
Parameters:
----------
root : str, default '~/.chainer/datasets/svhn'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".chainer", "datasets", "svhn"),
mode="train",
transform=None):
assert (root is not None)
self.transform = transform
train_ds, test_ds = get_svhn()
self.base = train_ds if mode == "train" else test_ds
def __len__(self):
return len(self.base)
def get_example(self, i):
image, label = self.base[i]
image = self.transform(image)
return image, label
class SVHNMetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(SVHNMetaInfo, self).__init__()
self.label = "SVHN"
self.root_dir_name = "svhn"
self.dataset_class = SVHN
self.num_training_samples = 73257
| [
"osemery@gmail.com"
] | osemery@gmail.com |
c738e7dd0a22c2c6d399f18ba00ba42343053ea5 | 90cad1df7b7d424feb8e71ff3d77e772d446afdf | /reebill/payment_dao.py | 7a26623a1f93df8ab5c4f36c04a68d01f167cc14 | [] | no_license | razagilani/billing | acb8044c22b4075250c583f599baafe3e09abc2e | fd2b20019eeedf0fcc781e5d81ff240be90c0b37 | refs/heads/master | 2021-05-01T14:46:32.138870 | 2016-03-09T18:55:09 | 2016-03-09T18:55:09 | 79,589,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,111 | py | from datetime import datetime
from sqlalchemy import and_
from core.model import Session, UtilityAccount
from reebill.exceptions import IssuedBillError
from reebill.reebill_model import ReeBillCustomer, Payment
class PaymentDAO(object):
'''CRUD for Payment objects. Some of these methods are used only in tests
and should be removed.
'''
def create_payment(self, account, date_applied, description,
credit, date_received=None):
'''Adds a new payment, returns the new Payment object. By default,
'date_received' is the current datetime in UTC when this method is
called; only override this for testing purposes.'''
# NOTE a default value for 'date_received' can't be specified as a
# default argument in the method signature because it would only get
# evaluated once at the time this module was imported, which means its
# value would be the same every time this method is called.
if date_received is None:
date_received = datetime.utcnow()
session = Session()
utility_account = session.query(UtilityAccount) \
.filter(UtilityAccount.account==account).one()
reebill_customer = session.query(ReeBillCustomer) \
.filter(ReeBillCustomer.utility_account==utility_account) \
.one()
new_payment = Payment(reebill_customer, date_received, date_applied,
description, credit)
session.add(new_payment)
session.flush()
return new_payment
def delete_payment(self, id):
'''Deletes the payment with the given id.'''
session = Session()
payment = session.query(Payment).filter(Payment.id == id).one()
if payment.reebill_id is not None:
raise IssuedBillError('payments cannot be deleted after they are'
'applied to an issued reebill')
session.delete(payment)
def find_payment(self, account, periodbegin, periodend):
'''Returns a list of payment objects whose date_applied is in
[periodbegin, period_end).'''
# periodbegin and periodend must be non-overlapping between bills. This
# is in direct opposition to the reebill period concept, which is a
# period that covers all services for a given reebill and thus overlap
# between bills. Therefore, a non overlapping period could be just the
# first utility service on the reebill. If the periods overlap,
# payments will be applied more than once. See 11093293
session = Session()
utility_account = session.query(UtilityAccount) \
.filter(UtilityAccount.account==account).one()
reebill_customer = session.query(ReeBillCustomer) \
.filter(ReeBillCustomer.utility_account==utility_account) \
.one()
payments = session.query(Payment) \
.filter(Payment.reebill_customer == reebill_customer) \
.filter(and_(Payment.date_applied >= periodbegin,
Payment.date_applied < periodend)).all()
return payments
def get_total_payment_since(self, account, start, end=None):
'''Returns sum of all account's payments applied on or after 'start'
and before 'end' (today by default). If 'start' is None, the beginning
of the interval extends to the beginning of time.
'''
assert isinstance(start, datetime)
if end is None:
end=datetime.utcnow()
session = Session()
reebill_customer = session.query(ReeBillCustomer).join(
UtilityAccount).filter_by(account=account).one()
payments = session.query(Payment) \
.filter(Payment.reebill_customer==reebill_customer) \
.filter(Payment.date_applied < end)
if start is not None:
payments = payments.filter(Payment.date_applied >= start)
return payments.all()
def get_payments(self, account):
'''Returns list of all payments for the given account ordered by
date_received.'''
session = Session()
payments = session.query(Payment).join(ReeBillCustomer) \
.join(UtilityAccount) \
.filter(UtilityAccount.account == account).order_by(
Payment.date_received).all()
return payments
def get_payments_for_reebill_id(self, reebill_id):
session = Session()
payments = session.query(Payment) \
.filter(Payment.reebill_id == reebill_id).order_by(
Payment.date_received).all()
return payments
def update_payment(self, id, date_applied, description, credit):
session = Session()
payment = session.query(Payment).filter_by(id=id).one()
if payment.reebill_id is not None:
raise IssuedBillError('payments cannot be changed after they are'
'applied to an issued reebill')
payment.date_applied = date_applied
payment.description = description
payment.credit = credit
| [
"dklothe@skylineinnovations.com"
] | dklothe@skylineinnovations.com |
19e03a975d6e84656aa59f9380034203aaaba1c3 | d3fa8ded9d393ba9b03388ba7f05fc559cf31d1e | /Codes/antman/agent/framework/controllers/file.py | e53710b676cb8afb754414194c3a23579657a3a0 | [] | no_license | lengxu/YouYun | e20c4d8f553ccb245e96de177a67f776666e986f | b0ad8fd0b0e70dd2445cecb9ae7b00f7e0a20815 | refs/heads/master | 2020-09-13T22:30:49.642980 | 2017-11-27T03:13:34 | 2017-11-27T03:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,515 | py | # coding: utf-8
import nfs
import os
import logging
import time
from tornado import web, gen
from tornado.locks import Semaphore
from tornado.httpclient import AsyncHTTPClient
from framework import settings
from framework.config import config
MAX_BODY_SIZE = 4 * 1024.0 * 1024.0 * 1024.0 # 4GB
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
AsyncHTTPClient.configure(None, max_body_size=MAX_BODY_SIZE)
logger = logging.getLogger('default')
semaphore = Semaphore(config.get('file_service_semaphore', 5))
class FileHandler(web.RequestHandler):
@gen.coroutine
def get(self):
self.file_name = self.get_argument('filename') # type: str
self.space_dir = nfs.join(settings.REPO_DIR,
settings.REPO_ANT_SPACENAME)
if not nfs.exists(self.space_dir):
nfs.makedirs(self.space_dir)
self.file_path = nfs.join(self.space_dir, self.file_name)
lock_file_name = nfs.extsep + self.file_name + nfs.extsep + 'lock'
self.lock_file = nfs.join(self.space_dir, lock_file_name)
logger.info('#%d Request file: %s', id(self.request), self.file_name)
if nfs.exists(self.lock_file):
yield self.wait_for_file_complete()
else:
is_cache_hit = yield self.try_to_return_file_cache()
if is_cache_hit:
return
logger.info('#%d File cache missed: %s',
id(self.request), self.file_path)
nfs.touch(self.lock_file)
yield self.request_file_from_upstream()
@gen.coroutine
def try_to_return_file_cache(self):
is_cache_hit = False
if nfs.exists(self.file_path):
flag = yield self.check_file_mtime()
if flag:
logger.info('#%d File cache hit: %s',
id(self.request), self.file_path)
self.write(self.file_path) # 直接返回本地缓存文件的路径
is_cache_hit = True
else:
logger.info('#{} The cache file is too old and need to '
'download the new file'.format(id(self.request)))
nfs.remove(self.file_path)
raise gen.Return(is_cache_hit)
@gen.coroutine
def check_file_mtime(self):
is_match = False
try:
http_client = AsyncHTTPClient()
sep = '' if config['upstream'].endswith('/') else '/'
url = '{upstream}{sep}file/{filename}'.format(
upstream=config['upstream'], sep=sep, filename=self.file_name)
response = yield http_client.fetch(
url, method="HEAD", validate_cert=False)
m_time = response.headers.get('Last-Modified', None)
if m_time:
m_time = time.mktime(time.strptime(m_time, GMT_FORMAT))
file_m_time = os.stat(self.file_path).st_mtime
if m_time and file_m_time and m_time == file_m_time:
is_match = True
else:
logger.error('#{} The m_time from server is {}, the m_time '
'from cache is {} !'.format(
id(self.request), m_time, file_m_time))
except Exception as e:
logger.error('#{} Get Last-Modified from server error: {}'
.format(id(self.request), e))
raise gen.Return(is_match)
@gen.coroutine
def wait_for_file_complete(self):
logger.info('#%d File lock exists, waiting for complete: %s',
id(self.request), self.file_path)
lock_watch_interval = config.get('file_service_lock_watch_interval',
5.0)
current_timeout = 0.0
request_timeout = config.get('file_service_request_timeout', 3600.0)
while current_timeout < request_timeout:
yield gen.sleep(lock_watch_interval)
current_timeout += lock_watch_interval
if not nfs.exists(self.lock_file) and nfs.exists(self.file_path):
self.write(self.file_path) # 文件缓存完毕,返回本地缓存文件的路径
return
else:
logger.info('#%d Waiting for file complete: %s',
id(self.request), self.file_path)
# 等待文件缓存超时
self.send_error(504, message='Waiting for file complete timeout')
def on_file_chunk(self, chunk):
if self.temp_file and not self.temp_file.closed:
self.temp_file.write(chunk)
@gen.coroutine
def request_file_from_upstream(self):
# 不存在本地缓存,也不存在lock文件,向上游请求下载
try:
yield semaphore.acquire() # 文件下载临界区,防止AsyncHTTPClient资源耗尽
self.temp_file = open(self.file_path, 'wb')
http_client = AsyncHTTPClient()
sep = '' if config['upstream'].endswith('/') else '/'
url = '{upstream}{sep}file/{filename}'.format(
upstream=config['upstream'], sep=sep, filename=self.file_name)
response = yield http_client.fetch(
url,
validate_cert=False,
streaming_callback=self.on_file_chunk,
connect_timeout=config.get('file_service_connect_timeout',
3600.0),
request_timeout=config.get('file_service_request_timeout',
3600.0))
self.generate_response(response)
except Exception as exc:
logger.error(
'#%d Error while fetching %s: %s',
id(self.request),
self.file_name,
exc,
exc_info=True)
self.send_error(500, message=exc)
finally:
yield semaphore.release()
self.close_file_resource()
def generate_response(self, response):
if response.code == 200:
logger.info('#%d Complete, change file last-modified',
id(self.request))
if self.temp_file and not self.temp_file.closed:
self.temp_file.close()
m_time = response.headers.get('Last-Modified', None)
m_time = time.mktime(time.strptime(m_time, GMT_FORMAT)) \
if m_time else time.time()
# 将文件的修改时间改成和server端相同,来判断文件是否更新了
os.utime(self.file_path, (int(time.time()), int(m_time)))
self.write(self.file_path)
else:
logger.error('#%d Non-200 file response from upstream: %d',
id(self.request), response.code)
self.send_error(
500,
message='Non-200 file response from upstream:{}'
.format(response.code))
def close_file_resource(self):
try:
if self.temp_file and not self.temp_file.closed:
self.temp_file.close()
if nfs.exists(self.lock_file):
nfs.remove(self.lock_file)
except Exception as exc:
logger.error(
'#%d Error while closing resource (%s): %s',
id(self.request),
self.file_path,
exc,
exc_info=True)
self.send_error(500, message=exc) # FIXME: 有可能是请求结束后调用
| [
"smartbrandnew@163.com"
] | smartbrandnew@163.com |
b7e9a84dfee84fe3a63a89a7b9e557c566d47949 | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/eve/client/script/ui/structure/structureSettings/schedule/__init__.py | 2527ee24c593c6d6a1cfdf69e8d5bb20d7008db8 | [] | no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\structure\structureSettings\schedule\__init__.py
__author__ = 'bara' | [
"victorique.de.blois@asu.edu"
] | victorique.de.blois@asu.edu |
a88b50f072fc292e98b1924ee430d4fb78ab9eb7 | dc50eb6176b4f5609519e912bc5379cea3fac9d2 | /Learn/spider/21DaysOfDistributedSpider/ch06/jianshu_spider/jianshu_spider/start.py | 93c5f64fabc256f15c10cbb8676d97c8fddd221c | [] | no_license | shuxiangguo/Python | 890c09a028e660206a8b3a8c7ca094a6f642095d | 089b2795e1db113dea6333d8dee6803071921cab | refs/heads/master | 2020-04-06T10:28:17.851981 | 2018-12-20T04:39:03 | 2018-12-20T04:39:03 | 157,381,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # encoding: utf-8
"""
@author: shuxiangguo
@file: start.py
@time: 2018-11-29 17:26:57
"""
from scrapy import cmdline
cmdline.execute("scrapy crawl js".split()) | [
"shuxiangguo7@gmail.com"
] | shuxiangguo7@gmail.com |
d82c316008ae3777ac502b60b15b2a6e27e8e845 | 7986ec6498e3f93967fa9bfe2b6a9d4056138293 | /Protheus_WebApp/Modules/SIGAPCP/MATA660TESTCASE.py | 16968c8e2418ee95143f196717d2106ec59cdfd2 | [
"MIT"
] | permissive | HelenaAdrignoli/tir-script-samples | 7d08973e30385551ef13df15e4410ac484554303 | bb4f4ab3a49f723216c93f66a4395e5aa328b846 | refs/heads/master | 2023-02-21T11:26:28.247316 | 2020-04-28T16:37:26 | 2020-04-28T16:37:26 | 257,304,757 | 0 | 0 | MIT | 2020-04-20T14:22:21 | 2020-04-20T14:22:20 | null | UTF-8 | Python | false | false | 1,304 | py | from tir import Webapp
import unittest
class MATA660(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAPCP','26/04/2019','T1','D MG 01 ','10')
inst.oHelper.Program('MATA660')
def test_MATA660_001(self):
self.oHelper.SetButton('Outras Ações', 'Incluir')
self.oHelper.SetBranch('D MG 01')
self.oHelper.SetValue('H9_RECURSO','MT6601')
self.oHelper.SetValue('H9_MOTIVO','QUEBRA DE EQUIPAMENTO')
self.oHelper.SetValue('H9_DTINI','25/04/2019')
self.oHelper.SetValue('H9_DTFIM','27/04/2019')
self.oHelper.SetValue('H9_HRINI','10:00')
self.oHelper.SetValue('H9_HRFIM','15:00')
self.oHelper.SetButton('Salvar')
self.oHelper.SetButton('Cancelar')
self.oHelper.SetButton('Visualizar')
self.oHelper.CheckResult('H9_RECURSO','MT6601')
self.oHelper.CheckResult('H9_CCUSTO','PCP000001')
self.oHelper.CheckResult('H9_MOTIVO','QUEBRA DE EQUIPAMENTO')
self.oHelper.CheckResult('H9_DTINI','25/04/2019')
self.oHelper.CheckResult('H9_DTFIM','27/04/2019')
self.oHelper.CheckResult('H9_HRINI','10:00')
self.oHelper.CheckResult('H9_HRFIM','15:00')
self.oHelper.SetButton('Cancelar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | [
"hadrignoli@gmail.com"
] | hadrignoli@gmail.com |
b260ab5265f53119dfc20bd10fab69410b969e8d | 39a5908ff24b9a4d9b5e9a90f76ba248ec47fd39 | /mymultispider/mymultispider/spiders/myspd2.py | c1944c6bf3acf97d9f71130cedffe87a9a2df2b9 | [] | no_license | terroristhouse/crawler | 281b10ccc2490b4f1a86eae7ae819cf408f15bd8 | 3c501da46deef73b80e381d6d3c45cc049702d14 | refs/heads/master | 2020-12-27T03:28:20.586755 | 2020-09-08T03:43:56 | 2020-09-08T03:43:56 | 284,569,509 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
import scrapy
from mymultispider.items import Myspd2spiderItem
class Myspd2Spider(scrapy.Spider):
name = 'myspd2'
allowed_domains = ['sina.com.cn']
start_urls = ['http://sina.com.cn/']
# custom_settings = {
# 'ITEM_PIPELINES': {'mymultispider.pipelines.Myspd2spiderPipeline': 300},
# }
def parse(self, response):
print('myspd2')
item = Myspd2spiderItem()
item['name'] = 'myspd2的pipelines'
yield item | [
"867940410@qq.com"
] | 867940410@qq.com |
17f36f2e6c5b6dc04263fd98c3913c332d50c9a7 | 7cd30248342dc83e0b49409bed4b3df378b629b1 | /sampling_image_15channels.py | d5d3bc787979ea380c94ef835d399663a0d72d43 | [] | no_license | minhnd3796/NGUYENDUCMINH_CODE | d34fc5cb0c9ba4108faf500170a8bea5bdef1d04 | 9fb27777ca0d40018c7154f7be19b420cf391471 | refs/heads/master | 2021-04-06T20:47:34.958473 | 2018-06-12T08:36:33 | 2018-06-12T08:36:33 | 125,452,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,190 | py | import os
import numpy as np
import scipy.misc as misc
base_dir_train = "../ISPRS_semantic_labeling_Vaihingen/train_15channels"
base_dir_validate = "../ISPRS_semantic_labeling_Vaihingen/validate_15channels"
base_dir_annotations = "../ISPRS_semantic_labeling_Vaihingen/annotations"
base_dir_top = "../ISPRS_semantic_labeling_Vaihingen/top"
base_dir_ndsm = "../ISPRS_semantic_labeling_Vaihingen/ndsm"
base_dir_dsm = "../ISPRS_semantic_labeling_Vaihingen/dsm"
base_dir_ndvi= "../ISPRS_semantic_labeling_Vaihingen/ndvi"
base_dir_L= "../ISPRS_semantic_labeling_Vaihingen/L"
base_dir_A= "../ISPRS_semantic_labeling_Vaihingen/A"
base_dir_B= "../ISPRS_semantic_labeling_Vaihingen/B"
base_dir_ele= "../ISPRS_semantic_labeling_Vaihingen/ele"
base_dir_azi= "../ISPRS_semantic_labeling_Vaihingen/azi"
base_dir_sat= "../ISPRS_semantic_labeling_Vaihingen/sat"
base_dir_entpy= "../ISPRS_semantic_labeling_Vaihingen/entpy"
base_dir_entpy2= "../ISPRS_semantic_labeling_Vaihingen/entpy2"
base_dir_texton= "../ISPRS_semantic_labeling_Vaihingen/texton"
base_dir_train_validate_gt = "../ISPRS_semantic_labeling_Vaihingen/train_validate_gt_15channels"
image_size = 224
num_cropping_per_image = 3333
validate_image=['top_mosaic_09cm_area11.png']
def create_training_dataset():
for filename in os.listdir(base_dir_annotations):
if filename in validate_image:
continue
top_image = misc.imread(os.path.join(base_dir_top,os.path.splitext(filename)[0]+".tif"))
annotation_image = misc.imread(os.path.join(base_dir_annotations, filename))
dsm_image_name= filename.replace('top_mosaic','dsm').replace('png','tif').replace('area','matching_area')
dsm_image= misc.imread(base_dir_dsm+"/"+dsm_image_name)
ndsm_image_name= dsm_image_name.replace('.tif','')+"_normalized.jpg"
ndsm_image= misc.imread(base_dir_ndsm+"/"+ndsm_image_name)
A_image_name = "A"+ndsm_image_name.replace('dsm_09cm_matching_area','').replace('_normalized.jpg','.tif')
A_image = misc.imread(base_dir_A + "/"+ A_image_name)
azi_image_name = A_image_name.replace('A','azi')
azi_image = misc.imread(base_dir_azi+"/"+azi_image_name)
B_image_name = A_image_name.replace('A', 'B')
B_image = misc.imread(base_dir_B + "/" + B_image_name)
ele_image_name = A_image_name.replace('A', 'ele')
ele_image = misc.imread(base_dir_ele + "/" + ele_image_name)
entpy_image_name = A_image_name.replace('A', 'entpy')
entpy_image = misc.imread(base_dir_entpy + "/" + entpy_image_name)
entpy2_image_name = A_image_name.replace('A', 'entpy2')
entpy2_image = misc.imread(base_dir_entpy2 + "/" + entpy2_image_name)
L_image_name = A_image_name.replace('A', 'L')
L_image = misc.imread(base_dir_L + "/" + L_image_name)
ndvi_image_name = A_image_name.replace('A', 'ndvi')
ndvi_image = misc.imread(base_dir_ndvi + "/" + ndvi_image_name)
sat_image_name = A_image_name.replace('A', 'sat')
sat_image = misc.imread(base_dir_sat + "/" + sat_image_name)
texton_image_name = A_image_name.replace('A', 'texton')
texton_image = misc.imread(base_dir_texton + "/" + texton_image_name)
width= np.shape(top_image)[1]
height= np.shape(top_image)[0]
for i in range(num_cropping_per_image):
x = int(np.random.uniform(0, height - image_size + 1))
y = int(np.random.uniform(0, width - image_size + 1))
print((x,y))
top_image_cropped= top_image[x:x + image_size, y:y + image_size, :]
ndsm_image_cropped= ndsm_image[x:x + image_size, y:y + image_size]
ndsm_image_cropped= np.expand_dims(ndsm_image_cropped,axis=2)
dsm_image_cropped= dsm_image[x:x + image_size, y:y + image_size]
dsm_image_cropped= np.expand_dims(dsm_image_cropped,axis=2)
A_image_cropped = A_image[x:x + image_size, y:y + image_size]
A_image_cropped = np.expand_dims(A_image_cropped, axis=2)
azi_image_cropped = azi_image[x:x + image_size, y:y + image_size]
azi_image_cropped = np.expand_dims(azi_image_cropped, axis=2)
B_image_cropped = B_image[x:x + image_size, y:y + image_size]
B_image_cropped = np.expand_dims(B_image_cropped, axis=2)
ele_image_cropped = ele_image[x:x + image_size, y:y + image_size]
ele_image_cropped = np.expand_dims(ele_image_cropped, axis=2)
entpy_image_cropped = entpy_image[x:x + image_size, y:y + image_size]
entpy_image_cropped = np.expand_dims(entpy_image_cropped, axis=2)
entpy2_image_cropped = entpy2_image[x:x + image_size, y:y + image_size]
entpy2_image_cropped = np.expand_dims(entpy2_image_cropped, axis=2)
L_image_cropped = L_image[x:x + image_size, y:y + image_size]
L_image_cropped = np.expand_dims(L_image_cropped, axis=2)
ndvi_image_cropped = ndvi_image[x:x + image_size, y:y + image_size]
ndvi_image_cropped = np.expand_dims(ndvi_image_cropped, axis=2)
sat_image_cropped = sat_image[x:x + image_size, y:y + image_size]
sat_image_cropped = np.expand_dims(sat_image_cropped, axis=2)
texton_image_cropped = texton_image[x:x + image_size, y:y + image_size]
texton_image_cropped = np.expand_dims(texton_image_cropped, axis=2)
array_to_save= np.concatenate((top_image_cropped,ndsm_image_cropped,dsm_image_cropped, A_image_cropped,
azi_image_cropped, B_image_cropped, ele_image_cropped, entpy_image_cropped, entpy2_image_cropped,
L_image_cropped, ndvi_image_cropped, sat_image_cropped, texton_image_cropped),axis=2).astype(dtype=np.float16)
np.save(os.path.join(base_dir_train, os.path.splitext(filename)[0] + "_" + str(i)+".npy"),array_to_save)
annotation_image_cropped= annotation_image[x:x + image_size, y:y + image_size]
misc.imsave(os.path.join(base_dir_train_validate_gt, os.path.splitext(filename)[0] + "_" + str(i) + ".png"), annotation_image_cropped)
return None
def create_validation_dataset():
for filename in validate_image:
top_image = misc.imread(os.path.join(base_dir_top, os.path.splitext(filename)[0] + ".tif"))
annotation_image = misc.imread(os.path.join(base_dir_annotations, filename))
dsm_image_name = filename.replace('top_mosaic', 'dsm').replace('png', 'tif').replace('area','matching_area')
dsm_image = misc.imread(base_dir_dsm + "/" + dsm_image_name)
ndsm_image_name = dsm_image_name.replace('.tif', '') + "_normalized.jpg"
ndsm_image = misc.imread(base_dir_ndsm + "/" + ndsm_image_name)
width = np.shape(top_image)[1]
height = np.shape(top_image)[0]
for i in range(num_cropping_per_image):
x = int(np.random.uniform(0, height - image_size + 1))
y = int(np.random.uniform(0, width - image_size + 1))
print((x, y))
top_image_cropped = top_image[x:x + image_size, y:y + image_size, :]
ndsm_image_cropped = ndsm_image[x:x + image_size, y:y + image_size]
ndsm_image_cropped = np.expand_dims(ndsm_image_cropped, axis=2)
dsm_image_cropped = dsm_image[x:x + image_size, y:y + image_size]
dsm_image_cropped = np.expand_dims(dsm_image_cropped, axis=2)
array_to_save = np.concatenate((top_image_cropped, ndsm_image_cropped, dsm_image_cropped), axis=2).astype(dtype=np.float16)
np.save(os.path.join(base_dir_validate, os.path.splitext(filename)[0] + "_" + str(i) + ".npy"), array_to_save)
# misc.imsave(os.path.join(base_dir_train, os.path.splitext(filename)[0] + "_" + str(i) + ".tif"), top_image_cropped)
annotation_image_cropped = annotation_image[x:x + image_size, y:y + image_size]
misc.imsave(os.path.join(base_dir_train_validate_gt, os.path.splitext(filename)[0] + "_" + str(i) + ".png"),
annotation_image_cropped)
return None
if __name__=="__main__":
create_training_dataset()
| [
"gordonnguyen3796@gmail.com"
] | gordonnguyen3796@gmail.com |
1ad8e42d8d9f216254a022a78cade94d1ffdf242 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20191209/example_metashape/conf2.py | 24069b8dfc3798d4637b5f160b6a247571a22e1b | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 1,089 | py | from __future__ import annotations
import typing as t
import dataclasses
@dataclasses.dataclass
class Toplevel:
site_name: str = "Material for MkDocs"
class Theme:
name: str = "material"
language: str = "en"
class Palette:
primary: str = "indigo"
accent: str = "indigo"
palette: t.Type["Toplevel.Theme.Palette"] = Palette
class Font:
text: str = "Roboto"
code: str = "Roboto Mono"
font: t.Type["Toplevel.Theme.Font"] = Font
theme: t.Type["Toplevel.Theme"] = Theme
# hmm
#
# diff --git a/metashape/analyze/typeinfo.py b/metashape/analyze/typeinfo.py
# index ede8971..9074fc5 100644
# --- a/metashape/analyze/typeinfo.py
# +++ b/metashape/analyze/typeinfo.py
# @@ -196,6 +196,8 @@ def typeinfo(
# is_optional=is_optional,
# )
# else:
# + inner = typing_inspect.get_args(typ)[0]
# + return typeinfo(inner)
# raise ValueError(f"unsuported type %{typ}")
# supertypes = []
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
c3c267c38c57dffabbb56aeea85b6750efebb684 | 57d964ebf76d2462f21017ec68e124b6728d8ccb | /setup.py | e663edcf79392ac1efe703d50ba0012944b55225 | [
"MIT"
] | permissive | chaeminlim/netron | a2983ada9d803549a3266e1a5922894b39226b26 | d75991128647a636425c746205b0a28a21d40e07 | refs/heads/master | 2021-01-15T03:54:05.742616 | 2020-03-05T10:45:53 | 2020-03-05T10:45:53 | 242,869,800 | 0 | 0 | MIT | 2020-02-24T23:56:50 | 2020-02-24T23:56:49 | null | UTF-8 | Python | false | false | 6,854 | py | #!/usr/bin/env python
import distutils
import io
import json
import os
import setuptools
import setuptools.command.build_py
import distutils.command.build
node_dependencies = [
( 'netron', [
'node_modules/d3/dist/d3.min.js',
'node_modules/dagre/dist/dagre.min.js',
'node_modules/handlebars/dist/handlebars.min.js',
'node_modules/marked/marked.min.js',
'node_modules/pako/dist/pako.min.js',
'node_modules/long/dist/long.js',
'node_modules/protobufjs/dist/protobuf.min.js',
'node_modules/protobufjs/ext/prototxt/prototxt.js',
'node_modules/flatbuffers/js/flatbuffers.js' ] )
]
class build(distutils.command.build.build):
user_options = distutils.command.build.build.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.version = None
def finalize_options(self):
distutils.command.build.build.finalize_options(self)
def run(self):
build_py.version = bool(self.version)
return distutils.command.build.build.run(self)
class build_py(setuptools.command.build_py.build_py):
user_options = setuptools.command.build_py.build_py.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
setuptools.command.build_py.build_py.initialize_options(self)
self.version = None
def finalize_options(self):
setuptools.command.build_py.build_py.finalize_options(self)
def run(self):
setuptools.command.build_py.build_py.run(self)
for target, files in node_dependencies:
target = os.path.join(self.build_lib, target)
if not os.path.exists(target):
os.makedirs(target)
for file in files:
self.copy_file(file, target)
def build_module(self, module, module_file, package):
setuptools.command.build_py.build_py.build_module(self, module, module_file, package)
if build_py.version and module == '__version__':
package = package.split('.')
outfile = self.get_module_outfile(self.build_lib, package, module)
with open(outfile, 'w+') as f:
f.write("__version__ = '" + package_version() + "'\n")
def package_version():
folder = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(folder, 'package.json')) as package_file:
package_manifest = json.load(package_file)
return package_manifest['version']
setuptools.setup(
name="netron",
version=package_version(),
description="Viewer for neural network, deep learning and machine learning models",
long_description='Netron is a viewer for neural network, deep learning and machine learning models.\n\n' +
'Netron supports **ONNX** (`.onnx`, `.pb`), **Keras** (`.h5`, `.keras`), **Core ML** (`.mlmodel`), **Caffe** (`.caffemodel`, `.prototxt`), **Caffe2** (`predict_net.pb`), **Darknet** (`.cfg`), **MXNet** (`.model`, `-symbol.json`), ncnn (`.param`) and **TensorFlow Lite** (`.tflite`). Netron has experimental support for **TorchScript** (`.pt`, `.pth`), **PyTorch** (`.pt`, `.pth`), **Torch** (`.t7`), **ArmNN** (`.armnn`), **BigDL** (`.bigdl`, `.model`), **Chainer** (`.npz`, `.h5`), **CNTK** (`.model`, `.cntk`), **Deeplearning4j** (`.zip`), **PaddlePaddle** (`__model__`), **MediaPipe** (`.pbtxt`), **ML.NET** (`.zip`), MNN (`.mnn`), **OpenVINO** (`.xml`), **scikit-learn** (`.pkl`), **TensorFlow.js** (`model.json`, `.pb`) and **TensorFlow** (`.pb`, `.meta`, `.pbtxt`, `.ckpt`, `.index`).',
keywords=[
'onnx', 'keras', 'tensorflow', 'tflite', 'coreml', 'mxnet', 'caffe', 'caffe2', 'torchscript', 'pytorch', 'ncnn', 'mnn' 'openvino', 'darknet', 'paddlepaddle', 'chainer',
'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
'visualizer', 'viewer'
],
license="MIT",
cmdclass={
'build': build,
'build_py': build_py
},
package_dir={
'netron': 'src'
},
packages=[
'netron'
],
package_data={
'netron': [
'favicon.ico', 'icon.png',
'base.js',
'numpy.js', 'pickle.js', 'hdf5.js', 'bson.js',
'zip.js', 'tar.js', 'gzip.js',
'armnn.js', 'armnn-metadata.json', 'armnn-schema.js',
'bigdl.js', 'bigdl-metadata.json', 'bigdl-proto.js',
'caffe.js', 'caffe-metadata.json', 'caffe-proto.js',
'caffe2.js', 'caffe2-metadata.json', 'caffe2-proto.js',
'chainer.js',
'cntk.js', 'cntk-metadata.json', 'cntk-proto.js',
'coreml.js', 'coreml-metadata.json', 'coreml-proto.js',
'darknet.js', 'darknet-metadata.json',
'dl4j.js', 'dl4j-metadata.json',
'flux.js', 'flux-metadata.json',
'keras.js', 'keras-metadata.json',
'mediapipe.js',
'mlnet.js', 'mlnet-metadata.json',
'mnn.js', 'mnn-metadata.json', 'mnn-schema.js',
'mxnet.js', 'mxnet-metadata.json',
'ncnn.js', 'ncnn-metadata.json',
'onnx.js', 'onnx-metadata.json', 'onnx-proto.js',
'openvino.js', 'openvino-metadata.json', 'openvino-parser.js',
'paddle.js', 'paddle-metadata.json', 'paddle-proto.js',
'pytorch.js', 'pytorch-metadata.json', 'python.js',
'sklearn.js', 'sklearn-metadata.json',
'tf.js', 'tf-metadata.json', 'tf-proto.js',
'tflite.js', 'tflite-metadata.json', 'tflite-schema.js',
'torch.js', 'torch-metadata.json',
'index.html', 'index.js',
'view-grapher.css', 'view-grapher.js',
'view-sidebar.css', 'view-sidebar.js',
'view.js',
'server.py'
]
},
install_requires=[],
author='Lutz Roeder',
author_email='lutzroeder@users.noreply.github.com',
url='https://github.com/lutzroeder/netron',
entry_points={
'console_scripts': [ 'netron = netron:main' ]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization'
]
) | [
"lutzroeder@users.noreply.github.com"
] | lutzroeder@users.noreply.github.com |
ded0b78eb650a9a7b1f02e68ec43f07f81a9da48 | 2b020a49e5c2bff241fd1a99fc31531ea2b6f8c1 | /pyLeetCode/S11_1_Container_With_Most_Water.py | 6e8aa6b416c2cb0808fe6774a90a5b807468c13c | [] | no_license | yangze01/algorithm | 7855461430dc0a5abcc8f1a94fda9318a0653e3e | 44968c3fd2ce02bd9ab18d02b487401a0d72c1a8 | refs/heads/master | 2021-01-11T11:09:54.668345 | 2018-04-08T15:04:11 | 2018-04-08T15:04:11 | 78,757,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
height = [1,2,3,4,5,6,7]
class Solution(object):
def maxArea(self,height):
max_val = 0
for i in range(0,len(height)-1):
for j in range(i+1,len(height)):
print(i,j)
tmp = (j-i)*min(height[i],height[j])
if(tmp>max_val):
max_val = tmp
return max_val
if __name__ == "__main__":
solution = Solution()
print(solution.maxArea(height))
| [
"858848101@qq.com"
] | 858848101@qq.com |
5330811aaf49a7feff3c2159e41445feaa2201d3 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /scripts_from_Astrocyte/scripts/mridataSort.py | 2ec9b124d72314d9b7234cf11aa592e10cf844cc | [
"BSD-2-Clause"
] | permissive | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,926 | py | import os, shutil, csv
src = '/media/katie/storage/PanicPTSD/data/raw_data'
dst = '/media/katie/storage/PanicPTSD/data-neworg/Panic/'
lst = '/media/katie/storage/PanicPTSD/data/PanicSubjListPHI.csv'
lst = open(lst)
def copy_and_rename(sub, exam, time):
for folder in os.listdir(src):
if folder == exam:
for root, dirs, files in os.walk(src + "/" + folder):
print dirs
anatcount = 0
simoncount = 0
affectcount = 0
for d in dirs:
if 'spgr' in d or 'SPGR' in d:
anatcount += 1
print anatcount
print "anat ", d
newname = exam + "-Anat-" + str(anatcount)
srcdir = os.path.join(root, d)
dstdir = os.path.join(dst, sub, 'anatomical', time, newname)
shutil.copytree(srcdir, dstdir)
recstring = os.path.join(dstdir, newname + '-record.txt')
record = open(recstring, 'w')
record.write(srcdir)
if 'simon' in d or 'SIMON' in d or 'rapid' in d or 'RAPID' in d:
simoncount += 1
print "simon ", d
newname = exam + "-Simon-" + str(simoncount)
srcdir = os.path.join(root, d)
dstdir = os.path.join(dst, sub, 'functional/simon/', time, newname)
shutil.copytree(srcdir, dstdir)
recstring = os.path.join(dstdir, newname + '-record.txt')
record = open(recstring, 'w')
record.write(srcdir)
if 'affect' in d or 'AFFECT' in d:
affectcount += 1
print "affect ", d
newname = exam + "-Affect-" + str(affectcount)
srcdir = os.path.join(root, d)
dstdir = os.path.join(dst, sub, 'functional/affect/', time, newname)
shutil.copytree(srcdir, dstdir)
recstring = os.path.join(dstdir, newname + '-record.txt')
record = open(recstring, 'w')
record.write(srcdir)
def makedir(dirlist):
newpath = ''
newdir = ''
for d in dirlist:
newpath = os.path.join(newpath, d)
for d in dirlist[0:-1]:
newdir = os.path.join(newdir, d)
endpath = dirlist[-1]
if endpath in os.listdir(newdir):
pass
else:
os.mkdir(newpath)
exam1 = 'zzzzz'
exam2 = 'zzzzz'
exam3 = 'zzzzz'
exam4 = 'zzzzz'
for subj in csv.DictReader(lst, dialect='excel', delimiter='\t'):
keys = [key for key in subj.keys()]
print keys
if subj['Study No.'] == '1010' or subj['Study No.'] == '1029':
subj_id = subj['Study No.'] + '-' + subj['PPID']
makedir([dst, subj_id])
makedir([dst, subj_id, 'anatomical'])
makedir([dst, subj_id, 'functional'])
makedir([dst, subj_id, 'functional', 'affect'])
makedir([dst, subj_id, 'functional', 'simon'])
if subj['Exam 1']:
exam1 = subj['Exam 1']
copy_and_rename(subj_id, exam1, 'Time1')
if subj['Exam 2']:
exam2 = subj['Exam 2']
copy_and_rename(subj_id, exam2, 'Time2')
if subj['Exam 3']:
exam3 = subj['Exam 3']
copy_and_rename(subj_id, exam3, 'Time3')
if subj['Exam 4']:
exam3 = subj['Exam 4']
copy_and_rename(subj_id, exam3, 'Time4')
| [
"katherine@Katherines-MacBook-Pro.local"
] | katherine@Katherines-MacBook-Pro.local |
124e013441d004398f64e0732e3bf47043367432 | ce6271f3dc32cf374e4dde5e4666e80242e83fde | /grow/partials/partial_test.py | c9db7c2b496316826deeb59f55989568296bcb5e | [
"MIT"
] | permissive | kmcnellis/grow | 26ab42e051906a1aaa28e52aae585b5ed5c497a9 | 4787f5a01681ef0800e9b4388a56cdbc48209368 | refs/heads/master | 2020-04-18T09:44:35.950251 | 2019-01-24T22:05:06 | 2019-01-24T22:05:06 | 167,445,373 | 1 | 0 | MIT | 2019-01-24T22:07:44 | 2019-01-24T22:07:44 | null | UTF-8 | Python | false | false | 1,289 | py | """Test the pod partial."""
import unittest
from grow import storage
from grow.pods import pods
from grow.testing import testing
class PartialTestCase(unittest.TestCase):
"""Tests for partials."""
def setUp(self):
dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(dir_path, storage=storage.FileStorage)
def test_editor_config(self):
"""Test that editor configuration is read correctly."""
partials = self.pod.partials
partial = partials.get_partial('hero')
expected = {
'label': 'Hero',
'editor': {
'fields': [
{
'type': 'text',
'key': 'title',
'label': 'Hero Title'
}, {
'type': 'text',
'key': 'subtitle',
'label': 'Hero Subtitle'
},
{
'type': 'markdown',
'key': 'description',
'label': 'Description'
},
],
},
}
self.assertEquals(expected, partial.editor_config)
if __name__ == '__main__':
unittest.main()
| [
"randy@blinkk.com"
] | randy@blinkk.com |
972103bb17ed3d5a13dc49f753c7d97fdf963e30 | 705fa27fb794898a3ee52a4af8446d7ef8ea13f4 | /tests/constants/route_parser.py | 4d3a6dc8de7d6c0fb40031e61128aa7085a0f51e | [
"MIT"
] | permissive | Mause/pytransperth | 1612063a0c9276ca9b0ae7399b2e9d15598c5dc3 | 411c6a38b8451dc917927bdc4fdb70aeb9acd52b | refs/heads/master | 2022-07-20T00:24:39.252527 | 2014-06-19T07:23:43 | 2014-06-19T07:23:43 | 16,773,465 | 0 | 0 | MIT | 2022-07-06T19:27:24 | 2014-02-12T16:25:48 | Python | UTF-8 | Python | false | false | 1,628 | py | import os
from lxml.html import builder as E
from lxml.etree import HTML
PATH = os.path.dirname(__file__)
with open(os.path.join(PATH, 'header.html')) as fh:
HEADER = HTML(fh.read()).find('body/tr')
STEPS = E.HTML(
E.TD(
E.DIV(
E.TABLE('STEP1'),
E.TABLE('STEP2'),
E.TABLE('EXCLUDED')
)
)
)
STEP_BUS = E.HTML(
E.TR(
E.TD(
E.IMG(alt="bus")
),
E.TD(
E.SPAN('ONE'),
E.SPAN('TWO')
),
E.TD(
E.SPAN('THREE'),
E.SPAN('FOUR')
)
)
)
STEP_TRAIN = E.HTML(
E.TR(
E.TD(
E.IMG(alt="train")
),
E.TD(
E.SPAN('ONE'),
E.SPAN('TWO')
),
E.TD(
E.SPAN('THREE'),
E.SPAN('FOUR')
)
)
)
STEP_WALK = E.HTML(
E.TR(
E.TD(
E.IMG(alt="walk")
),
E.TD(
E.SPAN('ONE'),
E.SPAN('TWO')
),
E.TD(
E.SPAN('THREE'),
E.SPAN('FOUR')
)
)
)
STEP_INVALID = E.HTML(
E.TR(
E.TD(
E.IMG(alt="invalid")
)
)
)
with open(os.path.join(PATH, 'misc.html')) as fh:
MISC = HTML(fh.read()).xpath('//html/body/tr')[0]
IMG = E.IMG(
onclick="getFares('11/11/1111', 1111)"
)
LINKS = E.HTML(
E.DIV(
E.IMG('ONE'),
E.IMG('TWO')
)
)
DURATION = E.HTML(
E.SPAN(
E.SPAN('IGNORED'),
E.SPAN('11:11 hrs')
)
)
with open(os.path.join(PATH, 'routes.html')) as fh:
ROUTES = fh.read()
| [
"jack.thatch@gmail.com"
] | jack.thatch@gmail.com |
2be54c9eb80269c4b94a171aa565464b596c6fc1 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/240230d6f4f34654aed81c439a0398cd.py | 6166c2b7d7b980ccf4335bc4439fcae7d5daf5a5 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 491 | py | def hey(statement):
if is_silence(statement):
return 'Fine. Be that way!'
if is_yelling(statement):
return 'Whoa, chill out!'
if is_question(statement):
return 'Sure.'
else:
return 'Whatever.'
def is_silence(statement):
if statement.isspace() or not statement:
return True
def is_yelling(statement):
if statement.isupper():
return True
def is_question(statement):
if statement.endswith('?'):
return True
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
940ee3c9cce97e80e85a1dcc48bf6688e73046d2 | bd649f51496a24a55a2327e658f31d6e03e2f602 | /InvTL/lm_py/py/apigen/project.py | 0f9fe198fd2c4884e281894e55ad3d2e3a5b2efb | [
"MIT"
] | permissive | mickg10/DARLAB | 6507530231f749e8fc1647f3a9bec22a20bebe46 | 0cd8d094fcaf60a48a3b32f15e836fcb48d93e74 | refs/heads/master | 2020-04-15T20:39:23.403215 | 2019-01-10T06:54:50 | 2019-01-10T06:54:50 | 16,510,433 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | """ this contains the code that actually builds the pages using layout.py
building the docs happens in two passes: the first one takes care of
collecting contents and navigation items, the second builds the actual
HTML
"""
import py
from layout import LayoutPage
class Project(py.__.doc.confrest.Project):
""" a full project
this takes care of storing information on the first pass, and building
pages + indexes on the second
"""
def __init__(self):
self.content_items = {}
def add_item(self, path, content):
""" add a single item (page)
path is a (relative) path to the object, used for building links
and navigation
content is an instance of some py.xml.html item
"""
assert path not in self.content_items, 'duplicate path %s' % (path,)
self.content_items[path] = content
def build(self, outputpath):
""" convert the tree to actual HTML
uses the LayoutPage class below for each page and takes care of
building index documents for the root and each sub directory
"""
opath = py.path.local(outputpath)
opath.ensure(dir=True)
paths = self.content_items.keys()
paths.sort()
for path in paths:
# build the page using the LayoutPage class
page = self.Page(self, path, stylesheeturl=self.stylesheet)
page.contentspace.append(self.content_items[path])
ipath = opath.join(path)
if not ipath.dirpath().check():
# XXX create index.html(?)
ipath.ensure(file=True)
ipath.write(page.unicode().encode(self.encoding))
def process(self, txtpath):
""" this allows using the project from confrest """
# XXX not interesting yet, but who knows later (because of the
# cool nav)
if __name__ == '__main__':
# XXX just to have an idea of how to use this...
proj = Project()
here = py.path.local('.')
for fpath in here.visit():
if fpath.check(file=True):
proj.add_item(fpath, convert_to_html_somehow(fpath))
proj.build()
| [
"root@darlab1.mickg.net"
] | root@darlab1.mickg.net |
1b4e33e92c9ae5c3d39692435a98f799ea4c7cd9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03733/s769945008.py | 6b0d84553580197dcec9f0813ba6500ba2eaf682 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import sys
input = sys.stdin.readline
def main():
N, T = map(int, input().split())
t = list(map(int, input().split()))
ans = T * N
for i in range(N - 1):
diff = t[i + 1] - t[i]
if diff < T:
ans -= T - diff
print(ans)
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
107bfdfe9d53d39bb9018b86c818d6f9d0cfe11d | 325ad4c64a3353a41e505737430ba9e9c1005014 | /src/fava/core/accounts.py | 44d32ac792f97022409bef80f9c72e70b5bba94d | [
"MIT"
] | permissive | Linusp/fava | 8a83e8cf57c948a2b324e3d08d7d62f0566d4cdd | 790ff2fc7d46470ed9a3b9c7ab5e3a7b7d960459 | refs/heads/main | 2023-04-15T19:06:59.472054 | 2023-04-01T15:40:48 | 2023-04-01T15:40:48 | 345,133,428 | 0 | 0 | MIT | 2021-03-06T15:57:21 | 2021-03-06T15:57:20 | null | UTF-8 | Python | false | false | 5,440 | py | """Account close date and metadata."""
from __future__ import annotations
import datetime
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from beancount.core.account import TYPE as ACCOUNT_TYPE
from beancount.core.compare import hash_entry
from beancount.core.data import Balance
from beancount.core.data import Close
from beancount.core.data import Custom
from beancount.core.data import Directive
from beancount.core.data import get_entry
from beancount.core.data import Meta
from beancount.core.data import Pad
from beancount.core.data import Transaction
from beancount.core.data import TxnPosting
from beancount.core.realization import find_last_active_posting
from beancount.core.realization import get
from beancount.core.realization import RealAccount
from fava.core._compat import FLAG_UNREALIZED
from fava.core.conversion import units
from fava.core.module_base import FavaModule
def uptodate_status(real_account: RealAccount) -> str | None:
"""Status of the last balance or transaction.
Args:
account_name: An account name.
Returns:
A status string for the last balance or transaction of the account.
- 'green': A balance check that passed.
- 'red': A balance check that failed.
- 'yellow': Not a balance check.
"""
for txn_posting in reversed(real_account.txn_postings):
if isinstance(txn_posting, Balance):
if txn_posting.diff_amount:
return "red"
return "green"
if (
isinstance(txn_posting, TxnPosting)
and txn_posting.txn.flag != FLAG_UNREALIZED
):
return "yellow"
return None
def balance_string(real_account: RealAccount) -> str:
"""Balance directive for the given account for today."""
account = real_account.account
today = str(datetime.date.today())
res = ""
for pos in units(real_account.balance):
res += (
f"{today} balance {account:<28}"
+ f" {pos.units.number:>15} {pos.units.currency}\n"
)
return res
@dataclass
class LastEntry:
"""Date and hash of the last entry for an account."""
#: The entry date.
date: datetime.date
#: The entry hash.
entry_hash: str
@dataclass
class AccountData:
"""Holds information about an account."""
#: The date on which this account is closed (or datetime.date.max).
close_date: datetime.date = datetime.date.max
#: The metadata of the Open entry of this account.
meta: Meta = field(default_factory=dict)
#: Uptodate status. Is only computed if the account has a
#: "fava-uptodate-indication" meta attribute.
uptodate_status: str | None = None
#: Balance directive if this account has an uptodate status.
balance_string: str | None = None
#: The last entry of the account (unless it is a close Entry)
last_entry: LastEntry | None = None
class AccountDict(FavaModule, Dict[str, AccountData]):
"""Account info dictionary."""
EMPTY = AccountData()
def __missing__(self, key: str) -> AccountData:
return self.EMPTY
def setdefault(
self, key: str, _: AccountData | None = None
) -> AccountData:
if key not in self:
self[key] = AccountData()
return self[key]
def load_file(self) -> None:
self.clear()
all_root_account = self.ledger.all_root_account
for open_entry in self.ledger.all_entries_by_type.Open:
meta = open_entry.meta
account_data = self.setdefault(open_entry.account)
account_data.meta = meta
real_account = get(all_root_account, open_entry.account)
assert real_account is not None
last = find_last_active_posting(real_account.txn_postings)
if last is not None and not isinstance(last, Close):
entry = get_entry(last)
account_data.last_entry = LastEntry(
date=entry.date, entry_hash=hash_entry(entry)
)
if meta.get("fava-uptodate-indication"):
account_data.uptodate_status = uptodate_status(real_account)
if account_data.uptodate_status != "green":
account_data.balance_string = balance_string(real_account)
for close in self.ledger.all_entries_by_type.Close:
self.setdefault(close.account).close_date = close.date
def all_balance_directives(self) -> str:
"""Balance directives for all accounts."""
return "".join(
account_details.balance_string
for account_details in self.values()
if account_details.balance_string
)
def get_entry_accounts(entry: Directive) -> list[str]:
"""Accounts for an entry.
Args:
entry: An entry.
Returns:
A list with the entry's accounts ordered by priority: For
transactions the posting accounts are listed in reverse order.
"""
if isinstance(entry, Transaction):
return list(reversed([p.account for p in entry.postings]))
if isinstance(entry, Custom):
return [val.value for val in entry.values if val.dtype == ACCOUNT_TYPE]
if isinstance(entry, Pad):
return [entry.account, entry.source_account]
account_ = getattr(entry, "account", None)
if account_ is not None:
return [account_]
return []
| [
"mail@jakobschnitzer.de"
] | mail@jakobschnitzer.de |
bd8049a039c6d0f7f361f1e87b327a78b2d933fb | b75ee1f07fcc50142da444e8ae9ba195bf49977a | /codeowl/search.py | 4c7342035518afb07f0f7ebac759a97fa4dda76d | [
"Apache-2.0"
] | permissive | FlorianLudwig/code-owl | 369bdb57a66c0f06e07853326be685c177e2802a | be6518c89fb49ae600ee004504f9485f328e1090 | refs/heads/master | 2016-08-04T02:26:07.445016 | 2014-05-25T19:19:13 | 2014-05-25T19:19:13 | 18,918,361 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | import copy
import os
from . import score
import codeowl.code
class Query(list):
pass
def generate_query(query_string):
tokens = codeowl.code.parse(query_string)
tokens = [token for token in tokens if not token.search_skip]
query = Query(tokens)
query.score_mali = 0
return query
def tokens(query, source_tokens, source_uri=None):
"""Search given tokens
:rtype: list[Result]
"""
matches = []
query_matches = []
for i, token in enumerate(source_tokens):
if query[0].match(token) >= 0:
# found new query start
query_matches.append(Result(query, source_uri))
for query_match in query_matches[:]:
if query_match.match(i, token):
matches.append(query_match)
query_matches.remove(query_match)
# filter double matches
match_pos = {}
for match in matches:
pos = match.matches[-1]
if pos in match_pos:
if match.diff <= match_pos[pos].diff:
match_pos[pos] = match
else:
match_pos[pos] = match
matches = match_pos.values()
# copy code into matches so we can generate snippets
# with highlighted code
for match in matches:
match.highlight_matches(source_tokens)
return matches
def path(query, source_path): # XXX go for generator
"""Search given path recursively
:rtype: list[Result]
"""
results = []
for dirpath, dirnames, filenames in os.walk(source_path):
for fname in filenames:
if fname.endswith('.py'):
results.extend(source_file(query, dirpath + '/' + fname))
results.sort(key=lambda r: r.diff)
return results
def source_file(query, file_path):
"""Search given file
:rtype: list[Result]
"""
code = codeowl.code.parse(open(file_path))
return tokens(query, code, file_path)
class Result(object):
def __init__(self, query, source_uri=None):
self.query = query
self.query_pos = 0
self.done = False
self.diff = 0
self.matches = []
self.source_uri = source_uri
def match(self, i, token):
diff = self.query[self.query_pos].match(token)
if diff != -1:
self.matches.append(i)
self.query_pos += 1
self.diff += diff
if self.query_pos == len(self.query):
self.done = True
return True
else:
self.diff += score.NON_MATCHING_TOKEN
return False
def highlight_matches(self, tokens):
self.tokens = tokens[:]
for match in self.matches:
token = copy.copy(self.tokens[match])
token.type = token.type.MATCH
self.tokens[match] = token
def code_snippet(self, start=None, end=None):
if start is None:
start = self.matches[0]
line_breaks = 0
while start > 0 and line_breaks < 2:
start -= 1
if self.tokens[start].value == '\n':
line_breaks += 1
start += 1 # we don't want to start with the found line break
elif start < 0:
start = len(self.tokens) - start + 1
if end is None:
end = self.matches[-1]
line_breaks = 0
while end < len(self.tokens) - 1 and line_breaks < 1:
end += 1
if self.tokens[end].value == '\n':
line_breaks += 1
elif end < 0:
end = len(self.tokens) - end + 1
# skip first line break
return self.tokens[start:end]
| [
"f.ludwig@greyrook.com"
] | f.ludwig@greyrook.com |
7f26da3a20c57f0e790d52199a3408bf6015437b | 4c1fea9e0f359b6c5ad33db64c6118f949ec254e | /pyroomacoustics/parameters.py | 27eae9dd2486de904c51044170425f605140ddc1 | [
"MIT"
] | permissive | vipchengrui/pyroomacoustics | 59bf42649787a1e2acb187050d524141af34b27c | 45b45febdf93340a55a719942f2daa9efbef9960 | refs/heads/master | 2020-12-01T08:48:03.395356 | 2019-12-10T08:58:18 | 2019-12-10T08:58:18 | 230,594,995 | 1 | 0 | MIT | 2019-12-28T10:31:56 | 2019-12-28T10:31:55 | null | UTF-8 | Python | false | false | 1,901 | py | # @version: 1.0 date: 09/07/2015 by Robin Scheibler
# @author: robin.scheibler@epfl.ch, ivan.dokmanic@epfl.ch, sidney.barthe@epfl.ch
# @copyright: EPFL-IC-LCAV 2015
'''
This file defines the main physical constants of the system
'''
# tolerance for computations
eps = 1e-10
# We implement the constants as a dictionnary so that they can
# be modified at runtime.
# The class Constants gives an interface to update the value of
# constants or add new ones.
_constants = {}
_constants_default = {
'c' : 343.0, # speed of sound at 20 C in dry air
'ffdist' : 10., # distance to the far field
'fc_hp' : 300., # cut-off frequency of standard high-pass filter
'frac_delay_length' : 81, # Length of the fractional delay filters used for RIR gen
}
class Constants:
'''
A class to provide easy access package wide to user settable constants.
Be careful of not using this in tight loops since it uses exceptions.
'''
def set(self, name, val):
# add constant to dictionnary
_constants[name] = val
def get(self, name):
try:
v = _constants[name]
except KeyError:
try:
v = _constants_default[name]
except KeyError:
raise NameError(name + ': no such constant')
return v
# the instanciation of the class
constants = Constants()
# Compute the speed of sound as a function
# of temperature, humidity, and pressure
def calculate_speed_of_sound(t, h, p):
'''
Compute the speed of sound as a function of
temperature, humidity and pressure
Parameters
----------
t:
temperature [Celsius]
h:
relative humidity [%]
p:
atmospheric pressure [kpa]
Returns
-------
Speed of sound in [m/s]
'''
# using crude approximation for now
return 331.4 + 0.6*t + 0.0124*h
| [
"fakufaku@gmail.com"
] | fakufaku@gmail.com |
d471b603fd6219e6ead621714e9324d5516486a3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/251/64527/submittedfiles/testes.py | 3feaf05985f19c0f1139cb33d4cea8996b379a3c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def listaDigitos(n):
d=[]
while n>0:
m=n%10
d.append(m)
n=n//10
d.reverse()
return(d)
def somaQuadrados(d):
soma=0
for i in range(0,len(d),1):
soma=soma+(d[i]**2)
return(soma)
def feliz(n):
inicial=n
felicidade=bool(False)
while felicidade==False:
digitosN=listaDigitos(n)
n1=somaQuadrados(digitosN)
if n1==1:
felicidade=True
return(True)
elif n==inicial:
felicidade=True
return(False)
break
n=n1
n=int(input('Digite o numero: '))
if feliz(n):
print('Feliz')
else:
print('Infeliz')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
12b7c8751ab24c72909bae8e49624df6b22e9c01 | 0fa03797c72ea761206a9b9cb92e1303d9d7e1b1 | /Lesson 1 - Essentials/Chap05 - operations/boolean-working.py | 2f1ddb8e35a684b300ddb9c140c0b11704e0a47d | [] | no_license | denemorhun/Python-Reference-Guide | c2de64949a6cb315318b7b541460c51379705680 | 450c25435169311f068d9457fbc2897661d1d129 | refs/heads/main | 2023-05-08T21:49:46.217280 | 2021-03-27T12:03:17 | 2021-03-27T12:03:17 | 319,233,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
a = True
b = False
x = ( 'bear', 'bunny', 'tree', 'sky', 'rain' )
y = 'bear'
if a and b:
print('expression is true')
else:
print('expression is false')
print ("feed the bear" if b else "don't feed bear")
if 'whale' not in x:
print("There are no whales")
| [
"denemorhun@gmail.com"
] | denemorhun@gmail.com |
6f45e400bb39f0d48206588e6ae5cba2eac6d878 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5738606668808192_0/Python/etotheipi/c.py | d3d0b9335209254b7ceb8da367ac6c792805ab3b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | import itertools
# precompute some primes
primes = [2, 3]
numPrime = 2
for n in xrange(5, 10 ** 6):
if n % 10000 == 0: print n
for i in xrange(numPrime):
if n % primes[i] == 0:
break
if n < primes[i] ** 2:
break
if n % primes[i] == 0:
continue
primes.append(n)
numPrime += 1
def genPrime():
for i in xrange(numPrime):
yield primes[i]
#todo
def factor(n):
for p in genPrime():
if n % p == 0:
return p
if n < p*p:
break
return n
def toBase(s, base):
r = 0
for c in s:
r *= base
r += ord(c) - ord('0')
return r
N = 16
J = 50
OUT = open('output.txt', 'w')
OUT.write('Case #1:\n')
for l in itertools.product(['0', '1'], repeat = N-2):
s = '1' + ''.join(l) + '1'
jamCoin = True
factors = []
for base in xrange(2, 11):
x = toBase(s, base)
factors.append(factor(x))
if factors[-1] == x: # may have false negative, but we don't need to be tight
jamCoin = False
break
if jamCoin:
answer = s + ' ' + ' '.join(map(str,factors))
OUT.write(answer + '\n')
print answer
J -= 1
if J == 0: break
OUT.close() | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
7d9901b72f9aa589600f47acedc063c0bf0e2841 | 21a1ee76bbcaccf2155885d9b183009f15665057 | /lib/exabgp/application/cli.py | d9ba33ae1efe52fafa6ce9815c67ddac297f8cb5 | [] | no_license | Akheon23/exabgp | ebaabde663e0c564b83dd2ea837312dae8234a1b | 82348efd7faccdd0db027df3f1f7574f09f329df | refs/heads/master | 2021-01-17T05:20:12.328012 | 2015-05-21T12:20:40 | 2015-05-21T12:20:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,486 | py | #!/usr/bin/env python
# encoding: utf-8
"""
cli.py
Created by Thomas Mangin on 2014-12-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import sys
from exabgp.dep.cmd2 import cmd
from exabgp.version import version
class Completed (cmd.Cmd):
# use_rawinput = False
# prompt = ''
# doc_header = 'doc_header'
# misc_header = 'misc_header'
# undoc_header = 'undoc_header'
ruler = '-'
completion = {}
def __init__ (self, intro=''):
self.prompt = '%s> ' % intro
cmd.Cmd.__init__(self)
def completedefault (self, text, line, begidx, endidx): # pylint: disable=W0613
commands = line.split()
local = self.completion
for command in commands:
if command in local:
local = local[command]
continue
break
return [_ for _ in local.keys() if _.startswith(text)]
def default (self, line):
print 'unrecognised syntax: ', line
def do_EOF (self):
return True
class SubMenu (Completed):
def do_exit (self, _):
return True
do_x = do_exit
class Attribute (SubMenu):
chars = ''.join(chr(_) for _ in range(ord('a'),ord('z')+1) + range(ord('0'),ord('9')+1) + [ord ('-')])
attribute = None
completion = {
'origin': {
'igp': {
},
'egp': {
},
'incomplete': {
},
},
}
def __init__ (self, name):
self.name = name
SubMenu.__init__(self,'attribute %s' % name)
def do_origin (self, line):
if line in ('igp','egp','incomplete'):
self.attribute['origin'] = line
else:
print 'invalid origin'
def do_as_path (self, line):
pass
# next-hop
def do_med (self, line):
if not line.isdigit():
print 'invalid med, %s is not a number' % line
return
med = int(line)
if 0 > med < 65536:
print 'invalid med, %s is not a valid number' % line
self.attribute['origin'] = line
# local-preference
# atomic-aggregate
# aggregator
# community
# originator-id
# cluster-list
# extended-community
# psmi
# aigp
def do_show (self, _):
print 'attribute %s ' % self.name + ' '.join('%s %s' % (key,value) for key,value in self.attribute.iteritems())
class ExaBGP (Completed):
completion = {
'announce': {
'route': {
},
'l2vpn': {
},
},
'neighbor': {
'include': {
},
'exclude': {
},
'reset': {
},
'list': {
},
},
'attribute': {
},
'show': {
'routes': {
'extensive': {
},
'minimal': {
},
},
},
'reload': {
},
'restart': {
},
}
def _update_prompt (self):
if self._neighbors:
self.prompt = '\n# neighbor ' + ', '.join(self._neighbors) + '\n> '
else:
self.prompt = '\n> '
#
# repeat last command
#
# last = 'help'
# def do_last (self, line):
# "Print the input, replacing '$out' with the output of the last shell command"
# # Obviously not robust
# if hasattr(self, 'last_output'):
# print line.replace('$out', self.last_output)
_neighbors = set()
def do_neighbor (self, line):
try:
action,ip = line.split()
except ValueError:
if line == 'reset':
print 'removed neighbors', ', '.join(self._neighbors)
self._neighbors = set()
self._update_prompt()
else:
print 'invalid syntax'
self.help_neighbor()
return
if action == 'include':
# check ip is an IP
# check ip is a known IP
self._neighbors.add(ip)
self._update_prompt()
elif action == 'exclude':
if ip in self._neighbors:
self._neighbors.remove(ip)
print 'neighbor excluded'
self._update_prompt()
else:
print 'invalid neighbor'
elif action == 'list':
print 'removed neighbors', ', '.join(self._neighbors)
else:
print 'invalid syntax'
self.help_neighbor()
def help_neighbor (self):
print "neighbor include <ip>: limit the action to the defined neighbors"
print "neighbor exclude <ip>: remove a particular neighbor"
print "neighbor reset : clear the neighbor previous set "
_attribute = {}
def do_attribute (self, name):
if not name:
self.help_attribute()
return
invalid = ''.join([_ for _ in name if _ not in Attribute.chars])
if invalid:
print 'invalid character(s) in attribute name: %s' % invalid
return
cli = Attribute(name)
cli.attribute = self._attribute.get(name,{})
cli.cmdloop()
def help_attribute (self):
print 'attribute <name>'
def do_quit (self, _):
return True
do_q = do_quit
def main():
if len(sys.argv) > 1:
ExaBGP().onecmd(' '.join(sys.argv[1:]))
else:
print "ExaBGP %s CLI" % version
ExaBGP('').cmdloop()
if __name__ == '__main__':
main()
| [
"thomas.mangin@exa-networks.co.uk"
] | thomas.mangin@exa-networks.co.uk |
46488abc7063d86e4c425d68aba3da2da3a55acc | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2572/60716/251969.py | 20cd79bb0e897d3938298518c16ef8e0f7e9dba4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | def operation_0(a,b,c):
for j in range(a,b+1):
status[j] = c
def operation_1(a,b):
templi = list()
for j in range(a,b+1):
templi.append(status[j])
temp = set(templi)
return len(temp)
n,t,m = map(int,input().split())
status = list()
answer = list()
operations = list()
for i in range(n):
status.append(1)
for i in range(m):
strs = input().split()
operations.append(strs)
# print(strs)
temp = strs.pop(0)
lists = [int(i) for i in strs]
# print(lists)
if temp=="C":
operation_0(lists[0]-1,lists[1]-1,lists[2])
if temp=="P":
#print("ques")
index = operation_1(lists[0]-1,lists[1]-1)
answer.append(index)
if answer[0]==2 and answer[1]==2 and len(answer)==2 and len(operations)!=4:
print("{} {} {}".format(n,t,m))
print(operations)
print(answer)
for i in range(len(answer)):
print(answer[i]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
8b305145f1f8fac6152dfbcb76194780d4a2f4d4 | 3851d5eafcc5fd240a06a7d95a925518412cafa0 | /Django_Code/gs44/enroll/forms.py | 9723300889ca9bd1a908b5fbebafee0e4833352c | [] | no_license | Ikshansaleem/DjangoandRest | c0fafaecde13570ffd1d5f08019e04e1212cc2f3 | 0ccc620ca609b4ab99a9efa650b5893ba65de3c5 | refs/heads/master | 2023-01-31T04:37:57.746016 | 2020-12-10T06:27:24 | 2020-12-10T06:27:24 | 320,180,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.core import validators
from django import forms
class StudentRegistration(forms.Form):
name = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
rpassword = forms.CharField(label='Password(again)', widget=forms.PasswordInput)
def clean(self):
cleaned_data = super().clean()
valpwd = self.cleaned_data['password']
valrpwd = self.cleaned_data['rpassword']
if valpwd != valrpwd :
raise forms.ValidationError('Password does not match')
| [
"ikshan3108@gmail.com"
] | ikshan3108@gmail.com |
b333ae9330a3f75aac01dbd5d090d9df9f977761 | 41dbb27af3a3ecabeb06e2fb45b3440bcc9d2b75 | /reglog/migrations/0013_auto_20201228_1133.py | 54c21dd73e77c2641e8e37b4021bcbb5fdcc2cdb | [] | no_license | joypaulgmail/Dookan | 4df83f37b7bcaff9052d5a09854d0bb344b9f05a | 7febf471dd71cc6ce7ffabce134e1e37a11309f7 | refs/heads/main | 2023-03-02T04:10:19.611371 | 2021-02-09T11:45:32 | 2021-02-09T11:45:32 | 336,476,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | # Generated by Django 3.1 on 2020-12-28 06:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reglog', '0012_product_book'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='book',
new_name='booking',
),
]
| [
"joypaul650@gmail.com"
] | joypaul650@gmail.com |
29d744e1a5e2cdd0bec25f5aec42161b5545172f | 886e43d26c08a9eb837f58d1ba1e9185815eced0 | /demos/setup_test.py | 2eb3380d094d17baebf9bc4e16c34f83eefe1b84 | [] | no_license | amaork/PyAppFramework | ca48c08d1d72430538a9b497e0641e7077a7c560 | c75ef175cb7f2a3fc6a3b7709ea07f86c5a7ba1e | refs/heads/master | 2023-09-01T13:39:38.667126 | 2023-08-30T09:49:47 | 2023-08-30T09:49:47 | 46,108,011 | 14 | 2 | null | 2018-06-11T07:21:34 | 2015-11-13T07:55:20 | Python | UTF-8 | Python | false | false | 332 | py | # -*- coding: utf-8 -*-
from ..misc.setup import get_git_commit_count, get_git_release_hash, get_git_release_date
if __name__ == "__main__":
print("Commit count:{0:d}".format(get_git_commit_count()))
print("Release hash:{0:s}".format(get_git_release_hash()))
print("Release data:{0:s}".format(get_git_release_date()))
| [
"amaork@gmail.com"
] | amaork@gmail.com |
c16ad00c9701e0cd53eee99c2d7c654023106bb1 | 85973bb901b69bf6fba310d18602bfb86d654b20 | /zjh/gen_hotpatch_zjh.py | ac9e7a644a2cd9b53aafc7b04a2b4edd4124d42a | [] | no_license | nneesshh/minguo-client | 35d1bb530f2099e4674919dc47a1c47c28f861d3 | d9e79b22388b98834c45a8a856a3d5ea85dd6ece | refs/heads/master | 2021-06-30T15:36:44.432882 | 2021-02-01T08:48:04 | 2021-02-01T08:48:04 | 184,216,552 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | #!/usr/bin/env python
#coding:utf-8
import os
import json
import hashlib
import subprocess
assetsDir = {
"searchDir" : ["src/app/game/zjh", "res/game/public", "res/game/zjh"],
"ignoreDir" : ["cocos", "obj", "patch"]
}
versionConfigFile = "res/patch/zjh_version_info.json" #版本信息的配置文件路径
versionManifestPath = "res/patch/zjh/version.manifest" #由此脚本生成的version.manifest文件路径
projectManifestPath = "res/patch/zjh/project.manifest" #由此脚本生成的project.manifest文件路径
class SearchFile:
def __init__(self):
self.fileList = []
for k in assetsDir:
if (k == "searchDir"):
for searchdire in assetsDir[k]:
self.recursiveDir(searchdire)
def recursiveDir(self, srcPath):
''' 递归指定目录下的所有文件'''
dirList = [] #所有文件夹
files = os.listdir(srcPath) #返回指定目录下的所有文件,及目录(不含子目录)
for f in files:
#目录的处理
if (os.path.isdir(srcPath + '/' + f)):
if (f[0] == '.' or (f in assetsDir["ignoreDir"])):
#排除隐藏文件夹和忽略的目录
pass
else:
#添加非需要的文件夹
dirList.append(f)
#文件的处理
elif (os.path.isfile(srcPath + '/' + f)):
self.fileList.append(srcPath + '/' + f) #添加文件
#遍历所有子目录,并递归
for dire in dirList:
#递归目录下的文件
self.recursiveDir(srcPath + '/' + dire)
def getAllFile(self):
''' get all file path'''
return tuple(self.fileList)
def getSvnCurrentVersion():
popen = subprocess.Popen(['svn', 'info'], stdout = subprocess.PIPE)
while True:
next_line = popen.stdout.readline()
if next_line == '' and popen.poll() != None:
break
valList = next_line.split(':')
if len(valList)<2:
continue
valList[0] = valList[0].strip().lstrip().rstrip(' ')
valList[1] = valList[1].strip().lstrip().rstrip(' ')
if(valList[0]=="Revision"):
return valList[1]
return ""
def calcMD5(filepath):
"""generate a md5 code by a file path"""
with open(filepath,'rb') as f:
md5obj = hashlib.md5()
md5obj.update(f.read())
return md5obj.hexdigest()
def getVersionInfo():
'''get version config data'''
configFile = open(versionConfigFile,"r")
json_data = json.load(configFile)
configFile.close()
#json_data["version"] = json_data["version"] + '.' + str(getSvnCurrentVersion())
return json_data
def genVersionManifestPath():
''' 生成大版本的version.manifest'''
json_str = json.dumps(getVersionInfo(), indent = 2)
fo = open(versionManifestPath,"w")
fo.write(json_str)
fo.close()
def genProjectManifestPath():
searchfile = SearchFile()
fileList = list(searchfile.getAllFile())
project_str = {}
project_str.update(getVersionInfo())
dataDic = {}
for f in fileList:
dataDic[f] = {"md5" : calcMD5(f)}
project_str.update({"assets":dataDic})
json_str = json.dumps(project_str, sort_keys = True, indent = 2)
fo = open(projectManifestPath,"w")
fo.write(json_str)
fo.close()
if __name__ == "__main__":
genVersionManifestPath()
genProjectManifestPath() | [
"unknown@example.com"
] | unknown@example.com |
8dd5d8f19fa5072e9120188d4f166ce23711b167 | 1f1ba16082e752c55271d4eac7a4b574ecacb94b | /rule-lists-python-package/rulelist/rulelistmodel/gaussianmodel/mdl_gaussian.py | 00417a145285071c215ba60df5d2ccfe60eb4aca | [
"MIT"
] | permissive | HMProenca/robust-rules-for-prediction-and-description | 2a7bab373d16f028709ce8deea4ebc6838b838ff | 236086566f853050a909fb4995c97909174cf074 | refs/heads/main | 2023-04-10T10:19:10.408602 | 2021-08-30T15:25:43 | 2021-08-30T15:25:43 | 401,391,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,065 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 18:09:04 2020
@author: gathu
"""
import math
from math import pi, log2
from numpy import inf
from rulelist.mdl.mdl_base_codes import log2_gamma_half
from rulelist.rulelistmodel.gaussianmodel.gaussianstatistic import GaussianFixedStatistic
def gaussian_bayesian_encoding(n: int,variance : float,log_gamma_n: float):
""" Computes the Bayesian encoding of single-numeric target with mean and variance unknown.
log_gamma_n : float
It is the appropriate value of the gamma function for a given n value. In the case of the Bayesian encoding
of the paper it is log2( Gamma(n/2) ).
"""
if n < 2 or variance == 0:
length = inf
else:
length = 1 + n/2*log2(pi) - log_gamma_n + 0.5*log2(n+1) + n/2*log2(n*variance)
return length
def gaussian_fixed_encoding(n: int, rss: float, variance: float):
""" Computes the encoding of a single-numeric target when the mean and variance are fixed to a value.
rss : float
Residual Sum of Squares with a fixed mean.
variance: float
Fixed variance of the Gaussian distribution.
"""
if variance == 0:
length = inf
else:
log2_e = 1.4426950408889634
length = 0.5*n*log2(2 * pi * variance)
length += 0.5 * log2_e * rss / variance
return length
def length_rule_free_gaussian(rulelist : classmethod, statistics : classmethod):
""" Computes alpha_gain of adding one rule that does not have fixed statistics.
"""
if any(statistics.variance) == 0 or statistics.usage <= 2:
codelength = inf
else:
loggamma_usg = log2_gamma_half(statistics.usage)
loggamma_2 = log2_gamma_half(2)
number_of_targets = len(statistics.mean)
l_bayesian_all = sum([gaussian_bayesian_encoding(statistics.usage, statistics.variance[nt], loggamma_usg)
for nt in range(number_of_targets)])
l_bayesian_2 = sum([gaussian_bayesian_encoding(2, statistics.variance_2points[nt], loggamma_2)
for nt in range(number_of_targets)])
if l_bayesian_2 == inf : raise Exception('l_bayesian_2 value is wrong: 2 closest points are possible wrong')
l_nonoptimal_2 = sum([gaussian_fixed_encoding(2, statistics.rss_2dataset[nt],
statistics.variance_dataset[nt])
for nt in range(number_of_targets)])
if l_nonoptimal_2 == inf : raise Exception('l_nonoptimal_2 value is wrong')
codelength = l_bayesian_all - l_bayesian_2 + l_nonoptimal_2
return codelength
def length_rule_fixed_gaussian(rulelist : classmethod, statistics : GaussianFixedStatistic):
""" Computes alpha_gain of one rule that does not have fixed statistics.
"""
number_of_targets = len(statistics.mean)
l_fixed = sum([gaussian_fixed_encoding(statistics.usage, statistics.rss[nt], statistics.variance[nt])
for nt in range(number_of_targets)])
return l_fixed
| [
"hugo.manuel.proenca@gmail.com"
] | hugo.manuel.proenca@gmail.com |
9522fa97499190ed7a1c7d7eac77f38c11cdf9ba | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02842/s106976287.py | f68eb41598fe68ec1ba6284e9c8a78c45cc4ca09 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import sys
from collections import deque
import numpy as np
import math
sys.setrecursionlimit(10**6)
def S(): return sys.stdin.readline().rstrip()
def SL(): return map(str,sys.stdin.readline().rstrip().split())
def I(): return int(sys.stdin.readline().rstrip())
def IL(): return map(int,sys.stdin.readline().rstrip().split())
def solve():
f = math.ceil(n/1.08)
if math.floor(f*1.08)==n:
print(f)
else:
print(':(')
return
if __name__=='__main__':
n = I()
solve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cfcd23b00235f894bb4ae4381726a46b24aaeefc | a70c29d384933040d318a1baf952965621b68490 | /serving/flask/tf/tf_request.py | 474ed0d17f0bcd56f2b284a0670d3904895d2493 | [
"MIT"
] | permissive | romadm/LibRecommender | f4980dcd117997284f96f7b042cf3fbbc8c0f99e | 46bb892453e88d8411e671bd72e7a8c6e8ef1575 | refs/heads/master | 2023-08-27T11:51:21.842980 | 2021-11-07T01:42:35 | 2021-11-07T01:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import argparse
import json
import requests
from serving.flask import colorize
def str2bool(v):
if isinstance(v, bool):
return v
elif v.lower() in ("yes", "true", "y", "1"):
return True
elif v.lower() in ("no", "false", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean liked value expected...")
def parse_args():
parser = argparse.ArgumentParser(description="request")
parser.add_argument("--user", type=str, help="user index")
parser.add_argument("--host", default="localhost")
parser.add_argument("--n_rec", type=int, default=10,
help="num of recommendations")
parser.add_argument("--port", default=5000, help="port")
parser.add_argument("--algo", default="tf", type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args() # http://127.0.0.1:5000/predict
url = f"http://{args.host}:{args.port}/{args.algo}/recommend"
data = {"user": args.user, "n_rec": args.n_rec}
try:
response = requests.post(url, json=json.dumps(data))
response_str = f"request_json: {response.json()}"
print(f"{colorize(response_str, 'green', bold=True)}")
except TypeError:
print("Could not serialize to json format...")
except json.JSONDecodeError:
print("Can't print response as json format...")
# python tf_request.py --user 1 --n_rec 10
| [
"wdmjjxg@163.com"
] | wdmjjxg@163.com |
bc19d8a3891a523ef3bdb6d9b253b313aedfeebb | a543a24f1b5aebf500c2200cd1d139435948500d | /Book/Ant/1-1/main.py | d7a1e9bc8e75faad0420ddae20bcf50962166c7a | [] | no_license | HomeSox/AtCoder | 18c89660762c3e0979596f0bcc9918c8962e4abb | 93e5ffab02ae1f763682aecb032c4f6f4e4b5588 | refs/heads/main | 2023-09-05T03:39:34.591433 | 2023-09-04T13:53:36 | 2023-09-04T13:53:36 | 219,873,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | n = int(input())
m = int(input())
k = list(map(int, input().split()))
answer = 'No'
for i1 in range(n):
for i2 in range(n):
for i3 in range(n):
for i4 in range(n):
if k[i1] + k[i2] + k[i3] + k[i4] == m:
answer = 'Yes'
print(answer) | [
"satory074@gmail.com"
] | satory074@gmail.com |
7a47391ef75e0091b09a665cea3a1fa44799edd1 | 435a7f571e6379e79010a7bbe2f9680a30b43ed8 | /src/blog/urls.py | aab2dc457187be27ddc92670e6b4200c5b054c62 | [] | no_license | Thuglifescientist2018/eastline_django2020 | 3b1c4bc6800fbdd9206dbdd076e1daf6d4228315 | e61882d91189ca8e0bfea42a715eeb2e87253dd2 | refs/heads/master | 2021-10-12T06:31:35.891679 | 2020-03-20T07:45:15 | 2020-03-20T07:45:15 | 248,694,105 | 0 | 0 | null | 2021-09-22T18:51:40 | 2020-03-20T07:31:34 | Python | UTF-8 | Python | false | false | 355 | py | from django.urls import path
from .views import blog_home, blog_list, blog_create, blog_render, blog_update, blog_delete
urlpatterns = [
path("", blog_home),
path("list", blog_list),
path("create", blog_create),
path("<str:slug>", blog_render),
path("<str:slug>/update", blog_update),
path("<str:slug>/delete", blog_delete)
] | [
"="
] | = |
8f21050531f4ad8e6d54d613afbbd6bf2eb37d5a | 0a14b78c83ca1d9f7465aed9b978101710750e4f | /task-urgency/sum-delta-task-urgency.py | f850e9fb1f72409a5a3a9b2cd73effe88d90bb92 | [] | no_license | writefaruq/EpuckExptAnalysisScripts | 8cdd5b8d8c584ed6265b792b81f490af27a69f14 | f6e45b4f181dfca629598a17decb94595877a577 | refs/heads/master | 2020-05-31T17:17:15.872463 | 2010-05-18T17:23:11 | 2010-05-18T17:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | #!/usr/bin/env python
import time
import sys
import os
import fnmatch
import fileinput
#INTERVAL = 50
HEADER_LINE = 2
STEP_TIME = 5
def sum_urgency(infile, outfile):
time_start = 0
time_end = 0
cum_urgency = 0
iter = 1
f = open(outfile, 'w')
try:
for line in fileinput.input(infile):
if line == '\n' or fileinput.lineno() <= HEADER_LINE:
continue
print "line # : ", fileinput.lineno()
ts = line.split(";")[0]
step = line.split(";")[1]
u = line.split(";")[2]
print u
if fileinput.lineno() == HEADER_LINE + 1:
#expt_begin = float(ts)
#time_start = float(ts)
#time_end = time_start + INTERVAL
time_start = int(step)
time_end = time_start + INTERVAL / STEP_TIME
cum_urgency = float(u)
continue
#if float(ts) <= time_end:
if int(step) <= time_end:
cum_urgency += float(u)
else:
print "Cumulative urgency:%f at iter %d" %(cum_urgency, iter)
outline = str(time_end) + ";" + str(iter)\
+ ";" + str(cum_urgency) + "\n"
iter += 1
cum_urgency = 0
#time_end = float(ts) + INTERVAL
time_end = time_end + INTERVAL / STEP_TIME
if fileinput.lineno() == HEADER_LINE + 1: # skip fisrt line
continue
else:
f.write(outline)
except Exception, e:
print e
fileinput.close()
f.close()
if __name__ == '__main__':
numargs = len(sys.argv)
if numargs < 3 or numargs > 3:
print "Usage: %s <delta-dir> <interval>" %sys.argv[0]
sys.exit(1)
else:
dir_path = sys.argv[1]
INTERVAL = int(sys.argv[2])
for file in os.listdir(dir_path):
if fnmatch.fnmatch(file, 'Delta*.txt'):
print "Parsing: ", file
outfile = "SumOver" + str(INTERVAL) + "sec-" + file
infile = dir_path + '/' + file
sum_urgency(infile, outfile)
| [
"Mdomarfaruque.Sarker@newport.ac.uk"
] | Mdomarfaruque.Sarker@newport.ac.uk |
897053339598d7c7ac10cf386fc1e4bd52a9034e | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4052/884004052.py | 98b4f26cc0fbf94d2be466c0ceaf4eca65740322 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 620 | py | from bots.botsconfig import *
from records004052 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MF',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BMP', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 5},
{ID: 'G61', MIN: 0, MAX: 3},
{ID: 'NTE', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'BAL', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
cf7190285977a6d072f2878e18cecc2e23781a05 | de40d3fa8d8af0030556d27d6833f6a1a0e7700c | /baekjoon/1551py/a.py | 4b2f1e5ea15cf88fcabae6827199ab542dd4806e | [] | no_license | NeoMindStd/CodingLife | cd6a627209c0353f4855f09fd5dfef8da4bbfef6 | bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3 | refs/heads/master | 2022-12-24T10:42:45.390085 | 2022-12-11T16:27:16 | 2022-12-11T16:27:16 | 191,797,634 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | n, k = map(int, input().split())
a = [list(map(int, input().split(','))),[]]
for i in range(k):
a[(i+1)%2].clear()
for j in range(n-i-1): a[(i+1)%2].append(a[i%2][j+1]-a[i%2][j])
print(*a[k%2], sep=',')
| [
"dwj1996@naver.com"
] | dwj1996@naver.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.