hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
909cbf44eb040c711d3814dcb9d6dc256f0a5ada | 242 | py | Python | main.py | cbhua/tool-bin2png | ed071c284d0d6a6c0145ea0dae0d9d3ad07aadbe | [
"MIT"
] | 3 | 2021-03-12T16:31:26.000Z | 2021-05-28T10:11:55.000Z | main.py | cbhua/tool-bin2png | ed071c284d0d6a6c0145ea0dae0d9d3ad07aadbe | [
"MIT"
] | null | null | null | main.py | cbhua/tool-bin2png | ed071c284d0d6a6c0145ea0dae0d9d3ad07aadbe | [
"MIT"
] | null | null | null | import os
from src.bin2png import bin2png
for root, dirs, files in os.walk('./bin'):
for file in files:
if not file.endswith('.bin'):
continue
bin2png(os.path.join(root, file), './png')
print('DONE') | 22 | 50 | 0.582645 | import os
from src.bin2png import bin2png
for root, dirs, files in os.walk('./bin'):
for file in files:
if not file.endswith('.bin'):
continue
bin2png(os.path.join(root, file), './png')
print('DONE') | 0 | 0 | 0 |
3d766f0dff6f8063405f5b2726577d753b3ee1d9 | 3,804 | py | Python | python/test/amstart/parse_log.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | 2 | 2017-06-07T03:20:42.000Z | 2020-01-07T09:14:26.000Z | python/test/amstart/parse_log.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | python/test/amstart/parse_log.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
from datetime import datetime
import matplotlib.pyplot as plt
logfile_live = "./android-live.logcat"
logfile_app = "./android-app.logcat"
TIMEFORMAT = "\d{2}-\d{2} (?P<datetime>\d{2}:\d{2}:\d{2}\.\d{3})"
re_keyevent = re.compile(TIMEFORMAT + ".*action=0x0, flags=0x8, keyCode=21.*")
re_l_pause = re.compile(TIMEFORMAT + ".*am_pause_activity.*Launcher\]$")
re_s_new_intent = re.compile(TIMEFORMAT + ".*am_new_intent.*com\.stv\.signalsourcemanager.*\]$")
re_l_on_pause = re.compile(TIMEFORMAT + ".*am_on_paused_called.*Launcher\]$")
re_s_resume = re.compile(TIMEFORMAT + ".*am_resume_activity.*com\.stv\.signalsourcemanager/\.MainActivity]$")
re_s_on_resume = re.compile(TIMEFORMAT + ".*am_on_resume_called.*com\.stv\.signalsourcemanager\.MainActivity]$")
re_s_draw_ok = re.compile(TIMEFORMAT + ".*ActivityManager: Draw ok$")
x_axes = [0, 1, 2, 3, 4, 5, 6]
x_descr = ['Input', 'L.pause', 'S.new_inent', 'L.on_pause', 'S.resume', 'S.on_resume', 'S.Draw']
colors = ['r', 'b', 'y', 'g', 'k', 'c', 'm']
plt.figure()
plt.xlim(0.0, 7)
plt.ylim(0.0, 1500)
for i, time_sample in enumerate(parser(logfile_live)):
ticks = cal_time_ticks(time_sample)
if i >= len(colors):
break
s = "sample{}".format(i)
plt.plot(x_axes, ticks, color=colors[i], linewidth=4.5, linestyle="-", label=s)
plt.xticks(x_axes, x_descr, rotation=30)
plt.xlabel("EventStatus")
plt.ylabel("Time(ms)")
plt.title("Start Activity Time")
plt.grid(True)
plt.legend(loc='upper left')
plt.show()
| 37.294118 | 112 | 0.588591 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
from datetime import datetime
import matplotlib.pyplot as plt
logfile_live = "./android-live.logcat"
logfile_app = "./android-app.logcat"
TIMEFORMAT = "\d{2}-\d{2} (?P<datetime>\d{2}:\d{2}:\d{2}\.\d{3})"
re_keyevent = re.compile(TIMEFORMAT + ".*action=0x0, flags=0x8, keyCode=21.*")
re_l_pause = re.compile(TIMEFORMAT + ".*am_pause_activity.*Launcher\]$")
re_s_new_intent = re.compile(TIMEFORMAT + ".*am_new_intent.*com\.stv\.signalsourcemanager.*\]$")
re_l_on_pause = re.compile(TIMEFORMAT + ".*am_on_paused_called.*Launcher\]$")
re_s_resume = re.compile(TIMEFORMAT + ".*am_resume_activity.*com\.stv\.signalsourcemanager/\.MainActivity]$")
re_s_on_resume = re.compile(TIMEFORMAT + ".*am_on_resume_called.*com\.stv\.signalsourcemanager\.MainActivity]$")
re_s_draw_ok = re.compile(TIMEFORMAT + ".*ActivityManager: Draw ok$")
def parser(file):
all_times=[]
try:
with open(file, 'rt') as lfile:
for line in lfile:
keyevent_ret = re_keyevent.search(line)
if keyevent_ret is None:
continue
time_sample=[]
time_sample.append(keyevent_ret.groupdict()['datetime'])
for line in lfile:
l_pause_ret = re_l_pause.search(line)
if l_pause_ret is not None:
time_sample.append(l_pause_ret.groupdict()['datetime'])
s_new_intent_ret = re_s_new_intent.search(line)
if s_new_intent_ret is not None:
time_sample.append(s_new_intent_ret.groupdict()['datetime'])
l_on_pause_ret = re_l_on_pause.search(line)
if l_on_pause_ret is not None:
time_sample.append(l_on_pause_ret.groupdict()['datetime'])
s_resume_ret = re_s_resume.search(line)
if s_resume_ret is not None:
time_sample.append(s_resume_ret.groupdict()['datetime'])
s_on_resume_ret = re_s_on_resume.search(line)
if s_on_resume_ret is not None:
time_sample.append(s_on_resume_ret.groupdict()['datetime'])
s_draw_ok_ret = re_s_draw_ok.search(line)
if s_draw_ok_ret is not None:
time_sample.append(s_draw_ok_ret .groupdict()['datetime'])
if len(time_sample) == 7:
all_times.append(time_sample)
break
# print(time_sample)
# print(all_times)
except IOError as err:
print("File Err: " + str(err))
return all_times
def cal_time_ticks(time_sample):
ticks=[0]
basetime = datetime.strptime(str(time_sample[0]),"%H:%M:%S.%f")
for i, s_time in enumerate(time_sample):
if i == 0:
continue
dt = datetime.strptime(str(s_time),"%H:%M:%S.%f")
diff = dt - basetime
ticks.append(diff.seconds * 1000 + diff.microseconds/1000)
return ticks
x_axes = [0, 1, 2, 3, 4, 5, 6]
x_descr = ['Input', 'L.pause', 'S.new_inent', 'L.on_pause', 'S.resume', 'S.on_resume', 'S.Draw']
colors = ['r', 'b', 'y', 'g', 'k', 'c', 'm']
plt.figure()
plt.xlim(0.0, 7)
plt.ylim(0.0, 1500)
for i, time_sample in enumerate(parser(logfile_live)):
ticks = cal_time_ticks(time_sample)
if i >= len(colors):
break
s = "sample{}".format(i)
plt.plot(x_axes, ticks, color=colors[i], linewidth=4.5, linestyle="-", label=s)
plt.xticks(x_axes, x_descr, rotation=30)
plt.xlabel("EventStatus")
plt.ylabel("Time(ms)")
plt.title("Start Activity Time")
plt.grid(True)
plt.legend(loc='upper left')
plt.show()
| 2,222 | 0 | 46 |
817ad7c0e77858d667fc47d3f904ab4b601aff90 | 2,239 | py | Python | examples/ds18b20/main.py | MAH1987/microhomie | e56fda36837e1745402e4c0ffb1642e01dfd3dea | [
"MIT"
] | 61 | 2018-03-12T22:38:12.000Z | 2022-02-19T03:58:35.000Z | examples/ds18b20/main.py | MAH1987/microhomie | e56fda36837e1745402e4c0ffb1642e01dfd3dea | [
"MIT"
] | 32 | 2018-02-25T06:48:59.000Z | 2021-07-13T23:38:59.000Z | examples/ds18b20/main.py | MAH1987/microhomie | e56fda36837e1745402e4c0ffb1642e01dfd3dea | [
"MIT"
] | 17 | 2018-03-29T07:40:09.000Z | 2021-05-30T09:47:59.000Z | import time
import uasyncio ad asyncio
from ds18x20 import DS18X20
from homie.node import HomieNode
from homie.device import HomieDevice, await_ready_state
from homie.property import HomieProperty
from homie.constants import FLOAT
from machine import Pin
from onewire import OneWire
if __name__ == "__main__":
main()
| 27.304878 | 78 | 0.609201 | import time
import uasyncio ad asyncio
from ds18x20 import DS18X20
from homie.node import HomieNode
from homie.device import HomieDevice, await_ready_state
from homie.property import HomieProperty
from homie.constants import FLOAT
from machine import Pin
from onewire import OneWire
class DS18B20(HomieNode):
def __init__(self, name="One Wire DS18B20", pin=12, interval=10, pull=-1):
super().__init__(id="ds18b20", name=name, type="ds18b20")
self.ds18b20 = DS18X20(OneWire(Pin(pin)))
addrs = self.ds18b20.scan()
if not addrs:
raise Exception("no DS18B20 found at bus on pin %d" % pin)
# save what should be the only address found
self.addr = addrs.pop()
self.interval = interval
self.p_temp = HomieProperty(
id="temperature",
name="Temperature",
datatype=FLOAT,
format="-40:80",
unit="°F",
default=0,
)
self.add_property(self.p_temp)
asyncio.create_task(self.update_data())
@await_ready_state
async def update_data(self):
delay = self.interval * 1000
while True:
self.p_temp.value = await self.read_temp()
await asyncio.sleep_ms(delay)
async def read_temp(self, fahrenheit=True):
"""
Reads temperature from a single DS18X20
:param fahrenheit: Whether or not to return value in Fahrenheit
:type fahrenheit: bool
:return: Temperature
:rtype: float
"""
self.ds18b20.convert_temp()
await asyncio.sleep_ms(750)
temp = self.ds18b20.read_temp(self.addr)
if fahrenheit:
ntemp = temp
self.device.dprint("Temp: " + str(self.c_to_f(ntemp)))
return self.c_to_f(ntemp)
return temp
@staticmethod
def c_to_f(c):
"""
Converts Celsius to Fahrenheit
:param c: Temperature in Celsius
:type c: float
:return: Temperature in Fahrenheit
:rtype: float
"""
return (c * 1.8) + 32
def main():
homie = HomieDevice(settings)
homie.add_node(DS18B20())
homie.run_forever()
if __name__ == "__main__":
main()
| 958 | 909 | 46 |
e24a38cc24327da8cdaa402c02eb7def92013bc2 | 223 | py | Python | appendix/pytorch/nsoltSubbandDeserialization2dLayer.py | nixir/SaivDr | f54bf29ef107916b26f42c0fbd6a23f2ef73b2b7 | [
"BSD-2-Clause"
] | 7 | 2018-11-26T08:49:24.000Z | 2021-09-15T08:46:35.000Z | appendix/pytorch/nsoltSubbandDeserialization2dLayer.py | nixir/SaivDr | f54bf29ef107916b26f42c0fbd6a23f2ef73b2b7 | [
"BSD-2-Clause"
] | 11 | 2019-12-02T11:20:18.000Z | 2022-02-14T05:17:11.000Z | appendix/pytorch/nsoltSubbandDeserialization2dLayer.py | nixir/SaivDr | f54bf29ef107916b26f42c0fbd6a23f2ef73b2b7 | [
"BSD-2-Clause"
] | 7 | 2019-06-05T07:45:20.000Z | 2021-09-15T08:46:36.000Z | import torch
import torch.nn as nn | 22.3 | 66 | 0.730942 | import torch
import torch.nn as nn
class NsoltSubbandDeserialization2dLayer(nn.Module):
def __init__(self):
super(NsoltSubbandDeserialization2dLayer, self).__init__()
def forward(self,x):
return x | 81 | 31 | 77 |
40ff63204516e6d672e6fa258e813ae77bcde1f4 | 768 | py | Python | teams/manual_team.py | lilium513/pycon2019-pongpy | efb084541c92c0ff53a9e13f0d950086c56168c8 | [
"MIT"
] | 11 | 2019-09-16T02:08:05.000Z | 2019-09-19T10:18:18.000Z | teams/manual_team.py | lilium513/pycon2019-pongpy | efb084541c92c0ff53a9e13f0d950086c56168c8 | [
"MIT"
] | null | null | null | teams/manual_team.py | lilium513/pycon2019-pongpy | efb084541c92c0ff53a9e13f0d950086c56168c8 | [
"MIT"
] | 10 | 2019-09-16T02:50:07.000Z | 2019-09-17T05:41:34.000Z | import pyxel
from pongpy.interfaces.team import Team
from pongpy.models.game_info import GameInfo
from pongpy.models.state import State
class ManualTeam(Team):
"""
デバッグ用の手動操作チーム。
Pyxel を直接読んでいるのでデバッグ用と以外では利用しない。
"""
@property
| 25.6 | 62 | 0.643229 | import pyxel
from pongpy.interfaces.team import Team
from pongpy.models.game_info import GameInfo
from pongpy.models.state import State
class ManualTeam(Team):
"""
デバッグ用の手動操作チーム。
Pyxel を直接読んでいるのでデバッグ用と以外では利用しない。
"""
@property
def name(self) -> str:
return 'Manual'
def atk_action(self, info: GameInfo, state: State) -> int:
if pyxel.btn(pyxel.KEY_I):
return -info.atk_return_limit
if pyxel.btn(pyxel.KEY_K):
return info.atk_return_limit
return 0
def def_action(self, info: GameInfo, state: State) -> int:
if pyxel.btn(pyxel.KEY_W):
return -info.def_return_limit
if pyxel.btn(pyxel.KEY_S):
return info.def_return_limit
return 0
| 439 | 0 | 80 |
1a2fef043bb60b57d450dba7201945e2b51ef1aa | 2,325 | py | Python | sparse_ct/example/n2self_train.py | mozanunal/SparseCT | 97d7f06c0414f934c7fa36023adcf9fe4c071eaf | [
"MIT"
] | 11 | 2020-11-01T11:35:30.000Z | 2022-03-30T02:19:52.000Z | sparse_ct/example/n2self_train.py | mozanunal/SparseCT | 97d7f06c0414f934c7fa36023adcf9fe4c071eaf | [
"MIT"
] | 8 | 2020-12-13T12:17:38.000Z | 2021-12-21T21:04:27.000Z | sparse_ct/example/n2self_train.py | mozanunal/SparseCT | 97d7f06c0414f934c7fa36023adcf9fe4c071eaf | [
"MIT"
] | null | null | null |
import random
from tqdm import tqdm
import glob
import numpy as np
import torch
from sparse_ct.reconstructor_2d.n2self import (
N2SelfReconstructor)
from sparse_ct.reconstructor_2d.dataset import (
DeepLesionDataset, EllipsesDataset)
if __name__ == "__main__":
params= {'batch_size': 8,
'shuffle': True,
'num_workers': 8}
N_PROJ = 64
pwd_train = '/external/CT_30_000/train'
pwd_test = '/external/CT_30_000/test'
file_list_train = glob.glob(pwd_train+'/*/*/*/*.png')
file_list_test = glob.glob(pwd_test+'/*/*/*/*.png')
print("file_list_train", len(file_list_train))
print("file_list_test", len(file_list_test))
# train_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# file_list_train,
# return_gt=False,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
# test_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# random.choices(file_list_test, k=1000),
# return_gt=True,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
train_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='train',
return_gt=False,
n_proj=N_PROJ,
img_size=512),
**params
)
test_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='validation',
return_gt=True,
n_proj=N_PROJ,
img_size=512),
**params
)
theta = np.linspace(0.0, 180.0, N_PROJ, endpoint=False)
recon_n2self = N2SelfReconstructor(
'N2SelfTrained',
net='unet', lr=0.0001,
n2self_weights=None,#'selfsuper-ellipses-64-l1-train1/iter_180000.pth',#'iter_15000.pth',
#'selfsuper-ellipses-64-train8/iter_58800.pth', #'self-super-train9/iter_199800.pth',
learnable_filter=False
)
recon_n2self.init_train(theta)
recon_n2self._eval(test_loader)
for i in range(50):
print('--------------- ',i)
recon_n2self._train_one_epoch(train_loader, test_loader)
recon_n2self._eval(test_loader)
recon_n2self._save('epoch_{}.pth'.format(i))
recon_n2self._save('end.pth')
| 28.703704 | 97 | 0.60172 |
import random
from tqdm import tqdm
import glob
import numpy as np
import torch
from sparse_ct.reconstructor_2d.n2self import (
N2SelfReconstructor)
from sparse_ct.reconstructor_2d.dataset import (
DeepLesionDataset, EllipsesDataset)
if __name__ == "__main__":
params= {'batch_size': 8,
'shuffle': True,
'num_workers': 8}
N_PROJ = 64
pwd_train = '/external/CT_30_000/train'
pwd_test = '/external/CT_30_000/test'
file_list_train = glob.glob(pwd_train+'/*/*/*/*.png')
file_list_test = glob.glob(pwd_test+'/*/*/*/*.png')
print("file_list_train", len(file_list_train))
print("file_list_test", len(file_list_test))
# train_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# file_list_train,
# return_gt=False,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
# test_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# random.choices(file_list_test, k=1000),
# return_gt=True,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
train_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='train',
return_gt=False,
n_proj=N_PROJ,
img_size=512),
**params
)
test_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='validation',
return_gt=True,
n_proj=N_PROJ,
img_size=512),
**params
)
theta = np.linspace(0.0, 180.0, N_PROJ, endpoint=False)
recon_n2self = N2SelfReconstructor(
'N2SelfTrained',
net='unet', lr=0.0001,
n2self_weights=None,#'selfsuper-ellipses-64-l1-train1/iter_180000.pth',#'iter_15000.pth',
#'selfsuper-ellipses-64-train8/iter_58800.pth', #'self-super-train9/iter_199800.pth',
learnable_filter=False
)
recon_n2self.init_train(theta)
recon_n2self._eval(test_loader)
for i in range(50):
print('--------------- ',i)
recon_n2self._train_one_epoch(train_loader, test_loader)
recon_n2self._eval(test_loader)
recon_n2self._save('epoch_{}.pth'.format(i))
recon_n2self._save('end.pth')
| 0 | 0 | 0 |
c59bef9a27c14a841661089c4012f24a6903ac37 | 11,604 | py | Python | pbil.py | duhines/NIC-MAXSAT | faab78fa72740e7dddc1444d5c3d8014f6b1ffe7 | [
"MIT"
] | 1 | 2018-10-18T16:39:03.000Z | 2018-10-18T16:39:03.000Z | pbil.py | duhines/NIC-MAXSAT | faab78fa72740e7dddc1444d5c3d8014f6b1ffe7 | [
"MIT"
] | null | null | null | pbil.py | duhines/NIC-MAXSAT | faab78fa72740e7dddc1444d5c3d8014f6b1ffe7 | [
"MIT"
] | null | null | null | """
Team: d3.js
Authors: David Anderson, Duncan Gans, Dustin Hines
Course: Nature-Inspired Computation
Assignment: Evolutionary Algorithms for MAXSAT: A Comparison of Genetic Algorithms
and Population Based Incremental Learning
Date: 1 October 2018
Description:
This file implements a PBIL approach to finding optima in MAXSAT problems.
It is imported and executed by code in genetic_alg.py; this code just
contains stuff directly relevant to PBIL.
Specifically, it includes three classes:
- PBILParameters
- PopulationVector
- BestSoFar
and N functions:
- score_pop
- fitness
- prettify
- print_PBIL_solution
- test_pbil
- pbil
The algorithm first generates a "population vector" containing likelihood
values for each literal. This vector is used for generating individuals
in test populations; for example, if the likelihood for literal 1 is 0.7,
the algorithm should generate individuals where lit1 is true 70% of the
time. Initially the population vector is entirely 0.5 values - a "flat"
distribution. Each time the algorithm iterates, it generates a population
of individuals using the population vector and then selects the best
N individuals from the population. These individuals are used to update
the Population vector with a CSL algorithm. The population is then
erased, the population vector is mutated, and a new population is
generated. The program keeps track of the best individual found and returns
it at the end as the optimum solution found. A full description of the
program and philosophy behind it can be found in our report.
"""
# Required libraries:
import random
# For testing: import time
import parse_input as parse
# Path is hardcoded to the problems folder. This folder
# should be located adjacent to genetic_alg.py
FILE = "problems/"
MAXSAT_PROBLEM = []
class PBILParameters:
"""
Class used to store PBIL cmd line args conveniently.
"""
# A class to look after our population vector:
class PopulationVector:
'''
'''
'''
A method that allows you to generate a population from a given popVector.
Inputs: pop_size, an int representation of how large a population to
generate
Returns: a population in the form of an array of arrays, where each sub-
array is an array of bools corresponding to literal values.
'''
'''
A method to update the probability vector based on the N best individuals
from a population.
Inputs: scored_pop - a sorted array containing tuple elements which contain
individuals and their fitness scores
ind_to_incl - The N best individuals to consider when updating pop_vector
alpha - learning rate for CSL algorithm
Returns: none, this operates in place
'''
'''Mutate pop vector in place.
inputs: Mu := P(mutation). Shift := degree of mutation
returns: none
'''
class BestSoFar:
"""
Purpose: keep track of the best solution so far and to provide a method to
compare the best so far to an individual.
"""
def compare_to_best(self, individual, ind_fit, iteration):
"""
Purpose: Update the best solution so far if the given individual
has a better solution.
Input: individual to check against best so far, iteration this individual
is from, fitness of this individual
Return: Boolean indicating whether the given individual was better than the
best solution so far.
"""
if ind_fit > self.fitness:
self.individual = individual.copy()
self.fitness = ind_fit
self.iteration_found = iteration
print("Found new best with score {} in generation {}".format(
self.fitness, self.iteration_found))
return True
return False
def score_pop(population, problem):
'''
Score the individuals in a population and sort them in descending
order by fitness
:param population: an array of arrays, where subarrays contain boolean
representations of individuals.
:param problem: A dictionary representation of a given MAXSAT problem
as returned by parse_input.py
:return: array of tuples. Each tuple contains two elements; the first is
an array containing boolean values for each literal, and corresponds to
an individual. The second value is an int representation of that
individuals fitness; higher is better.
'''
scored_generation = []
for individual in population:
score = fitness(individual, problem)
scored_generation.append((individual, score))
# From https://stackoverflow.com/questions/3121979/:
return sorted(scored_generation, key=lambda tup: tup[1], reverse=True)
def fitness(individual, problem):
"""
Score the fitness of an indivdual based on a MAXSAT problem.
:param individual: An "individual" represented as an array
:param problem: MAXSAT problem to compute fitness in ref to, usually
stored as global MAXSAT_PROBLEM
:return: An int representation of individuals fitness - higher is better
"""
fit_score = 0
for clause in problem["clauses"]:
check = False
for literal in clause:
if literal > 0:
check = check or individual[literal - 1]
else:
check = check or not individual[abs(literal) - 1]
if check:
fit_score += 1
return fit_score
def prettify(individual):
"""
Formats an array representation of an individual s.t. it can be printed
easily.
:param individual: an array representation of an individual
:return: a string representation of that same individual
"""
pretty = ""
ith_literal = 1
ten_per_line = 0
for literal in individual:
pretty = pretty + "L" + str(ith_literal) + ": " + str(literal) + " "
ith_literal = ith_literal + 1
ten_per_line = ten_per_line + 1
if ten_per_line > 10:
ten_per_line = 0
pretty = pretty + "\n"
return pretty
def print_PBIL_solution(curr_best, parameters, problem):
"""
Purpose: Print output in our nice lil standardized way; see writeup
:param curr_best: List representing the best solution
:param parameters: Problem parameters we got earlier
:return: None, this is a printing function.
"""
print("File: {}".format(parameters.file_name))
num_literals = problem["num_literals"]
num_clauses = problem["num_clauses"]
print("Literals count: {}\nClauses count: {}".format(num_literals, num_clauses))
fitness_div_clauses = curr_best.fitness / problem["num_clauses"]
percentage_correct = round(fitness_div_clauses * 100, 1)
print("Best individual scored {} ({}%)".format(curr_best.fitness,
percentage_correct))
print("Difference: {}".format(problem["num_clauses"] -
curr_best.fitness))
print("Solution:\n{}".format(prettify(curr_best.individual)))
print("Found in iteration {}".format(curr_best.iteration_found))
'''
def test_pbil(file_name, pop_size, num_incl, alpha, shift, mutation_prob, num_generations, algorithm):
"""
Used to test in conjuntion with the test module. Not important in final code
"""
global MAXSAT_PROBLEM
MAXSAT_PROBLEM = parse.return_problem("testy/" + file_name)
parameters = PBILParameters(file_name, pop_size, num_incl, alpha, shift, mutation_prob, num_generations, algorithm)
start = time.time()
solution = pbil(MAXSAT_PROBLEM, parameters)
finished = time.time()
run_time = finished - start
#time taken, how good the solution is, generation best solution
return (solution, run_time)
'''
def pbil(problem, parameters):
"""
Purpose: This is a function implementing PBIL optimization of MAXSAT
problems
:param problem: the MAXSAT problem to optimize, as parsed in parse_input.py
:param parameters: Problem parameters. Acquired in main of genetic_alg.py
:return: Returns the best individual found
"""
pop_vector = PopulationVector(problem["num_literals"])
curr_best = BestSoFar([], 0)
# The following is the actual PBIL algorithm:
iteration = 0
while iteration < parameters.num_generations:
print("Generation: {}".format(iteration))
nth_pop = pop_vector.generate_population(parameters.pop_size)
nth_pop = score_pop(nth_pop, problem)
# Initialize curr_best:
if iteration == 0:
curr_best = BestSoFar(nth_pop, iteration)
# Pull out the best individual and update best_so_far if it's better
curr_best.compare_to_best(nth_pop[0][0], nth_pop[0][1], iteration)
# Update pop vector using CSL approach described in paper:
pop_vector.update_vector(nth_pop,
parameters.ind_to_incl,
parameters.alpha)
pop_vector.mutate_vector(parameters.mutation_prob,
parameters.mu_shift)
iteration += 1
# Final population vector might approximate correct solution.
# So, we round it out and see if it's better than individuals we've already
# checked.
final_pop = [round(x) for x in pop_vector.vector]
curr_best.compare_to_best(final_pop, fitness(final_pop, problem),
parameters.num_generations)
# Print and return the best individual found:
print_PBIL_solution(curr_best, parameters, problem)
return curr_best
| 38.68 | 119 | 0.662789 | """
Team: d3.js
Authors: David Anderson, Duncan Gans, Dustin Hines
Course: Nature-Inspired Computation
Assignment: Evolutionary Algorithms for MAXSAT: A Comparison of Genetic Algorithms
and Population Based Incremental Learning
Date: 1 October 2018
Description:
This file implements a PBIL approach to finding optima in MAXSAT problems.
It is imported and executed by code in genetic_alg.py; this code just
contains stuff directly relevant to PBIL.
Specifically, it includes three classes:
- PBILParameters
- PopulationVector
- BestSoFar
and N functions:
- score_pop
- fitness
- prettify
- print_PBIL_solution
- test_pbil
- pbil
The algorithm first generates a "population vector" containing likelihood
values for each literal. This vector is used for generating individuals
in test populations; for example, if the likelihood for literal 1 is 0.7,
the algorithm should generate individuals where lit1 is true 70% of the
time. Initially the population vector is entirely 0.5 values - a "flat"
distribution. Each time the algorithm iterates, it generates a population
of individuals using the population vector and then selects the best
N individuals from the population. These individuals are used to update
the Population vector with a CSL algorithm. The population is then
erased, the population vector is mutated, and a new population is
generated. The program keeps track of the best individual found and returns
it at the end as the optimum solution found. A full description of the
program and philosophy behind it can be found in our report.
"""
# Required libraries:
import random
# For testing: import time
import parse_input as parse
# Path is hardcoded to the problems folder. This folder
# should be located adjacent to genetic_alg.py
FILE = "problems/"
MAXSAT_PROBLEM = []
class PBILParameters:
"""
Class used to store PBIL cmd line args conveniently.
"""
def __init__(self, file_name, pop_size,
ind_to_include, alpha,
mutation_shift, mutation_prob,
num_generations, algorithm):
self.file_name = file_name
self.pop_size = int(pop_size)
self.ind_to_incl = int(ind_to_include)
self.alpha = float(alpha)
self.mu_shift = float(mutation_shift)
self.mutation_prob = float(mutation_prob)
self.num_generations = int(num_generations)
self.algorithm = algorithm
# A class to look after our population vector:
class PopulationVector:
'''
'''
def __init__(self, num_lits):
# P = 0.5 for every literal to start with
self.num_lits = num_lits
self.vector = [0.5] * int(num_lits)
'''
A method that allows you to generate a population from a given popVector.
Inputs: pop_size, an int representation of how large a population to
generate
Returns: a population in the form of an array of arrays, where each sub-
array is an array of bools corresponding to literal values.
'''
def generate_population(self, pop_size):
population = []
individual = []
for i in range(pop_size):
for j in range(self.num_lits):
individual.append(random.random() < self.vector[j])
population.append(individual.copy())
individual = []
return population
'''
A method to update the probability vector based on the N best individuals
from a population.
Inputs: scored_pop - a sorted array containing tuple elements which contain
individuals and their fitness scores
ind_to_incl - The N best individuals to consider when updating pop_vector
alpha - learning rate for CSL algorithm
Returns: none, this operates in place
'''
def update_vector(self, scored_pop, ind_to_incl, alpha):
for individual in scored_pop[0:ind_to_incl]:
for i in range(self.num_lits):
if individual[0][i]:
self.vector[i] = self.vector[i]*(1 - alpha) + alpha
else:
self.vector[i] = self.vector[i]*(1 - alpha)
'''Mutate pop vector in place.
inputs: Mu := P(mutation). Shift := degree of mutation
returns: none
'''
def mutate_vector(self, mu, shift):
for i in range(len(self.vector)):
if random.random() < mu:
if random.random() < 0.5:
self.vector[i] += shift
if self.vector[i] > 1.0:
self.vector[i] = 1.0
elif random.random() >= 0.5 and not self.vector[i] <= 0.0:
self.vector[i] -= shift
if self.vector[i] < 0.0:
self.vector[i] = 0.0
class BestSoFar:
"""
Purpose: keep track of the best solution so far and to provide a method to
compare the best so far to an individual.
"""
def __init__(self, individual, iteration):
self.individual = individual
self.iteration_found = iteration
self.fitness = 0
def compare_to_best(self, individual, ind_fit, iteration):
"""
Purpose: Update the best solution so far if the given individual
has a better solution.
Input: individual to check against best so far, iteration this individual
is from, fitness of this individual
Return: Boolean indicating whether the given individual was better than the
best solution so far.
"""
if ind_fit > self.fitness:
self.individual = individual.copy()
self.fitness = ind_fit
self.iteration_found = iteration
print("Found new best with score {} in generation {}".format(
self.fitness, self.iteration_found))
return True
return False
def score_pop(population, problem):
'''
Score the individuals in a population and sort them in descending
order by fitness
:param population: an array of arrays, where subarrays contain boolean
representations of individuals.
:param problem: A dictionary representation of a given MAXSAT problem
as returned by parse_input.py
:return: array of tuples. Each tuple contains two elements; the first is
an array containing boolean values for each literal, and corresponds to
an individual. The second value is an int representation of that
individuals fitness; higher is better.
'''
scored_generation = []
for individual in population:
score = fitness(individual, problem)
scored_generation.append((individual, score))
# From https://stackoverflow.com/questions/3121979/:
return sorted(scored_generation, key=lambda tup: tup[1], reverse=True)
def fitness(individual, problem):
"""
Score the fitness of an indivdual based on a MAXSAT problem.
:param individual: An "individual" represented as an array
:param problem: MAXSAT problem to compute fitness in ref to, usually
stored as global MAXSAT_PROBLEM
:return: An int representation of individuals fitness - higher is better
"""
fit_score = 0
for clause in problem["clauses"]:
check = False
for literal in clause:
if literal > 0:
check = check or individual[literal - 1]
else:
check = check or not individual[abs(literal) - 1]
if check:
fit_score += 1
return fit_score
def prettify(individual):
"""
Formats an array representation of an individual s.t. it can be printed
easily.
:param individual: an array representation of an individual
:return: a string representation of that same individual
"""
pretty = ""
ith_literal = 1
ten_per_line = 0
for literal in individual:
pretty = pretty + "L" + str(ith_literal) + ": " + str(literal) + " "
ith_literal = ith_literal + 1
ten_per_line = ten_per_line + 1
if ten_per_line > 10:
ten_per_line = 0
pretty = pretty + "\n"
return pretty
def print_PBIL_solution(curr_best, parameters, problem):
"""
Purpose: Print output in our nice lil standardized way; see writeup
:param curr_best: List representing the best solution
:param parameters: Problem parameters we got earlier
:return: None, this is a printing function.
"""
print("File: {}".format(parameters.file_name))
num_literals = problem["num_literals"]
num_clauses = problem["num_clauses"]
print("Literals count: {}\nClauses count: {}".format(num_literals, num_clauses))
fitness_div_clauses = curr_best.fitness / problem["num_clauses"]
percentage_correct = round(fitness_div_clauses * 100, 1)
print("Best individual scored {} ({}%)".format(curr_best.fitness,
percentage_correct))
print("Difference: {}".format(problem["num_clauses"] -
curr_best.fitness))
print("Solution:\n{}".format(prettify(curr_best.individual)))
print("Found in iteration {}".format(curr_best.iteration_found))
'''
def test_pbil(file_name, pop_size, num_incl, alpha, shift, mutation_prob, num_generations, algorithm):
"""
Used to test in conjuntion with the test module. Not important in final code
"""
global MAXSAT_PROBLEM
MAXSAT_PROBLEM = parse.return_problem("testy/" + file_name)
parameters = PBILParameters(file_name, pop_size, num_incl, alpha, shift, mutation_prob, num_generations, algorithm)
start = time.time()
solution = pbil(MAXSAT_PROBLEM, parameters)
finished = time.time()
run_time = finished - start
#time taken, how good the solution is, generation best solution
return (solution, run_time)
'''
def pbil(problem, parameters):
"""
Purpose: This is a function implementing PBIL optimization of MAXSAT
problems
:param problem: the MAXSAT problem to optimize, as parsed in parse_input.py
:param parameters: Problem parameters. Acquired in main of genetic_alg.py
:return: Returns the best individual found
"""
pop_vector = PopulationVector(problem["num_literals"])
curr_best = BestSoFar([], 0)
# The following is the actual PBIL algorithm:
iteration = 0
while iteration < parameters.num_generations:
print("Generation: {}".format(iteration))
nth_pop = pop_vector.generate_population(parameters.pop_size)
nth_pop = score_pop(nth_pop, problem)
# Initialize curr_best:
if iteration == 0:
curr_best = BestSoFar(nth_pop, iteration)
# Pull out the best individual and update best_so_far if it's better
curr_best.compare_to_best(nth_pop[0][0], nth_pop[0][1], iteration)
# Update pop vector using CSL approach described in paper:
pop_vector.update_vector(nth_pop,
parameters.ind_to_incl,
parameters.alpha)
pop_vector.mutate_vector(parameters.mutation_prob,
parameters.mu_shift)
iteration += 1
# Final population vector might approximate correct solution.
# So, we round it out and see if it's better than individuals we've already
# checked.
final_pop = [round(x) for x in pop_vector.vector]
curr_best.compare_to_best(final_pop, fitness(final_pop, problem),
parameters.num_generations)
# Print and return the best individual found:
print_PBIL_solution(curr_best, parameters, problem)
return curr_best
| 1,867 | 0 | 156 |
26bdb9657a8e27d95462daba715cf844f0ab44b8 | 706 | py | Python | interfaces/solution_laurens/shippingstrategy.py | infelane/python-for-java-devs | 56f313f89ad8603598f879f31e0d9a35795e50e3 | [
"Apache-2.0"
] | 1 | 2019-10-20T16:05:30.000Z | 2019-10-20T16:05:30.000Z | interfaces/solution_laurens/shippingstrategy.py | infelane/python-for-java-devs | 56f313f89ad8603598f879f31e0d9a35795e50e3 | [
"Apache-2.0"
] | 1 | 2020-07-10T09:09:58.000Z | 2020-07-10T09:09:58.000Z | interfaces/solution_laurens/shippingstrategy.py | infelane/python-for-java-devs | 56f313f89ad8603598f879f31e0d9a35795e50e3 | [
"Apache-2.0"
] | 3 | 2020-07-10T07:46:51.000Z | 2022-02-21T08:58:45.000Z | from abc import ABC, abstractmethod
| 18.578947 | 53 | 0.671388 | from abc import ABC, abstractmethod
class Order(object):
pass
class ShippingStrategy(ABC):
@abstractmethod
def cost(self, order: Order) -> float:
pass
class FedExStrategy(ShippingStrategy):
def cost(self, order):
return 3.0
class UPSStrategy(ShippingStrategy):
def cost(self, order):
return 4.0
class PostalStrategy(ShippingStrategy):
def cost(self, order):
return 5.0
class ShippingCost(object):
def __init__(self, strategy: ShippingStrategy):
assert isinstance(strategy, ShippingStrategy)
self.strategy = strategy
def calculate(self, order: Order):
cost = self.strategy.cost(order)
return cost
| 278 | 117 | 269 |
14addc646899a196e5503353c560289ee548273f | 8,972 | py | Python | dark/diamond/hsp.py | UdoGi/dark-matter | 3d49e89fa5e81f83144119f6216c5774176d203b | [
"MIT"
] | 10 | 2016-03-09T09:43:14.000Z | 2021-04-03T21:46:12.000Z | dark/diamond/hsp.py | terrycojones/dark-matter | 67d16f870db6b4239e17e542bc6e3f072dc29c75 | [
"MIT"
] | 332 | 2015-01-07T12:37:30.000Z | 2022-01-20T15:48:11.000Z | dark/diamond/hsp.py | terrycojones/dark-matter | 67d16f870db6b4239e17e542bc6e3f072dc29c75 | [
"MIT"
] | 4 | 2016-03-08T14:56:39.000Z | 2021-01-27T08:11:27.000Z | from __future__ import division, print_function
import sys
from dark.btop import countGaps
def _debugPrint(hsp, queryLen, localDict, msg=''):
"""
Print debugging information showing the local variables used during
a call to normalizeHSP and the hsp and then raise an C{AssertionError}.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the length of the query sequence.
@param localDict: A C{dict} of local variables (as produced by locals()).
@param msg: A C{str} message to raise C{AssertionError} with.
@raise AssertionError: unconditionally.
"""
print('normalizeHSP error:', file=sys.stderr)
print(' queryLen: %d' % queryLen, file=sys.stderr)
print(' Original HSP:', file=sys.stderr)
for attr in ['bits', 'btop', 'expect', 'frame', 'query_end', 'query_start',
'sbjct', 'query', 'sbjct_end', 'sbjct_start']:
print(' %s: %r' % (attr, hsp[attr]), file=sys.stderr)
print(' Local variables:', file=sys.stderr)
for var in sorted(localDict):
if var != 'hsp':
print(' %s: %s' % (var, localDict[var]), file=sys.stderr)
raise AssertionError(msg)
def _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, localDict):
"""
Perform some sanity checks on an HSP. Call _debugPrint on any error.
@param subjectStart: The 0-based C{int} start offset of the match in the
subject.
@param subjectEnd: The 0-based C{int} end offset of the match in the
subject.
@param queryStart: The 0-based C{int} start offset of the match in the
query.
@param queryEnd: The 0-based C{int} end offset of the match in the query.
@param queryStartInSubject: The 0-based C{int} offset of where the query
starts in the subject.
@param queryEndInSubject: The 0-based C{int} offset of where the query
ends in the subject.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the C{int} length of the query sequence.
@param subjectGaps: the C{int} number of gaps in the subject.
@param queryGaps: the C{int} number of gaps in the query.
@param localDict: A C{dict} of local variables from our caller (as
produced by locals()).
"""
# Subject indices must always be ascending.
if subjectStart >= subjectEnd:
_debugPrint(hsp, queryLen, localDict, 'subjectStart >= subjectEnd')
subjectMatchLength = subjectEnd - subjectStart
queryMatchLength = queryEnd - queryStart
# Sanity check that the length of the matches in the subject and query
# are identical, taking into account gaps in both.
subjectMatchLengthWithGaps = subjectMatchLength + subjectGaps
queryMatchLengthWithGaps = queryMatchLength + queryGaps
if subjectMatchLengthWithGaps != queryMatchLengthWithGaps:
_debugPrint(hsp, queryLen, localDict,
'Including gaps, subject match length (%d) != Query match '
'length (%d)' % (subjectMatchLengthWithGaps,
queryMatchLengthWithGaps))
if queryStartInSubject > subjectStart:
_debugPrint(hsp, queryLen, localDict,
'queryStartInSubject (%d) > subjectStart (%d)' %
(queryStartInSubject, subjectStart))
if queryEndInSubject < subjectEnd:
_debugPrint(hsp, queryLen, localDict,
'queryEndInSubject (%d) < subjectEnd (%d)' %
(queryEndInSubject, subjectEnd))
def normalizeHSP(hsp, queryLen, diamondTask):
"""
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in DIAMOND output properly.
hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates
negative or positive sense (i.e., the direction of reading through the
query to get the alignment). The frame value is the nucleotide match offset
modulo 3, plus one (i.e., it tells us which of the 3 possible query reading
frames was used in the match).
NOTE: the returned readStartInSubject value may be negative. We consider
the subject sequence to start at offset 0. So if the query string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the subject. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record.
All passed offsets are 1-based.
@param queryLen: the length of the query sequence.
@param diamondTask: The C{str} command-line matching algorithm that was
run (either 'blastx' or 'blastp').
@return: A C{dict} with C{str} keys and C{int} offset values. Keys are
readStart
readEnd
readStartInSubject
readEndInSubject
subjectStart
subjectEnd
The returned offset values are all zero-based.
"""
queryGaps, subjectGaps = countGaps(hsp['btop'])
# Make some variables using Python's standard string indexing (start
# offset included, end offset not). No calculations in this function
# are done with the original 1-based HSP variables.
queryStart = hsp['query_start'] - 1
queryEnd = hsp['query_end']
subjectStart = hsp['sbjct_start'] - 1
subjectEnd = hsp['sbjct_end']
queryReversed = hsp['frame'] < 0
# Query offsets must be ascending, unless we're looking at blastx output
# and the query was reversed for the match.
if queryStart >= queryEnd:
if diamondTask == 'blastx' and queryReversed:
# Compute new query start and end indices, based on their
# distance from the end of the string.
#
# Above we took one off the start index, so we need to undo
# that (because the start is actually the end). We didn't take
# one off the end index, and need to do that now (because the
# end is actually the start).
queryStart = queryLen - (queryStart + 1)
queryEnd = queryLen - (queryEnd - 1)
else:
_debugPrint(hsp, queryLen, locals(), 'queryStart >= queryEnd')
if diamondTask == 'blastx':
# In DIAMOND blastx output, subject offsets are based on protein
# sequence length but queries (and the reported offsets) are
# nucleotide. Convert the query offsets to protein because we will
# plot against the subject (protein).
#
# Convert queryLen and the query nucleotide start and end offsets
# to be valid for the query after translation to AAs. When
# translating, DIAMOND may ignore some nucleotides at the start
# and/or the end of the original DNA query. At the start this is
# due to the frame in use, and at the end it is due to always using
# three nucleotides at a time to form codons.
#
# So, for example, a query of 6 nucleotides that is translated in
# frame 2 (i.e., the translation starts from the second nucleotide)
# will have length 1 as an AA sequence. The first nucleotide is
# ignored due to the frame and the last two due to there not being
# enough final nucleotides to make another codon.
#
# In the following, the subtraction accounts for the first form of
# loss and the integer division for the second.
initiallyIgnored = abs(hsp['frame']) - 1
queryLen = (queryLen - initiallyIgnored) // 3
queryStart = (queryStart - initiallyIgnored) // 3
queryEnd = (queryEnd - initiallyIgnored) // 3
# unmatchedQueryLeft is the number of query bases that will extend
# to the left of the start of the subject in our plots.
unmatchedQueryLeft = queryStart
# Set the query offsets into the subject.
queryStartInSubject = subjectStart - unmatchedQueryLeft
queryEndInSubject = queryStartInSubject + queryLen + queryGaps
_sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, locals())
return {
'readStart': queryStart,
'readEnd': queryEnd,
'readStartInSubject': queryStartInSubject,
'readEndInSubject': queryEndInSubject,
'subjectStart': subjectStart,
'subjectEnd': subjectEnd,
}
| 45.313131 | 79 | 0.665961 | from __future__ import division, print_function
import sys
from dark.btop import countGaps
def _debugPrint(hsp, queryLen, localDict, msg=''):
"""
Print debugging information showing the local variables used during
a call to normalizeHSP and the hsp and then raise an C{AssertionError}.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the length of the query sequence.
@param localDict: A C{dict} of local variables (as produced by locals()).
@param msg: A C{str} message to raise C{AssertionError} with.
@raise AssertionError: unconditionally.
"""
print('normalizeHSP error:', file=sys.stderr)
print(' queryLen: %d' % queryLen, file=sys.stderr)
print(' Original HSP:', file=sys.stderr)
for attr in ['bits', 'btop', 'expect', 'frame', 'query_end', 'query_start',
'sbjct', 'query', 'sbjct_end', 'sbjct_start']:
print(' %s: %r' % (attr, hsp[attr]), file=sys.stderr)
print(' Local variables:', file=sys.stderr)
for var in sorted(localDict):
if var != 'hsp':
print(' %s: %s' % (var, localDict[var]), file=sys.stderr)
raise AssertionError(msg)
def _sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, localDict):
"""
Perform some sanity checks on an HSP. Call _debugPrint on any error.
@param subjectStart: The 0-based C{int} start offset of the match in the
subject.
@param subjectEnd: The 0-based C{int} end offset of the match in the
subject.
@param queryStart: The 0-based C{int} start offset of the match in the
query.
@param queryEnd: The 0-based C{int} end offset of the match in the query.
@param queryStartInSubject: The 0-based C{int} offset of where the query
starts in the subject.
@param queryEndInSubject: The 0-based C{int} offset of where the query
ends in the subject.
@param hsp: The HSP C{dict} passed to normalizeHSP.
@param queryLen: the C{int} length of the query sequence.
@param subjectGaps: the C{int} number of gaps in the subject.
@param queryGaps: the C{int} number of gaps in the query.
@param localDict: A C{dict} of local variables from our caller (as
produced by locals()).
"""
# Subject indices must always be ascending.
if subjectStart >= subjectEnd:
_debugPrint(hsp, queryLen, localDict, 'subjectStart >= subjectEnd')
subjectMatchLength = subjectEnd - subjectStart
queryMatchLength = queryEnd - queryStart
# Sanity check that the length of the matches in the subject and query
# are identical, taking into account gaps in both.
subjectMatchLengthWithGaps = subjectMatchLength + subjectGaps
queryMatchLengthWithGaps = queryMatchLength + queryGaps
if subjectMatchLengthWithGaps != queryMatchLengthWithGaps:
_debugPrint(hsp, queryLen, localDict,
'Including gaps, subject match length (%d) != Query match '
'length (%d)' % (subjectMatchLengthWithGaps,
queryMatchLengthWithGaps))
if queryStartInSubject > subjectStart:
_debugPrint(hsp, queryLen, localDict,
'queryStartInSubject (%d) > subjectStart (%d)' %
(queryStartInSubject, subjectStart))
if queryEndInSubject < subjectEnd:
_debugPrint(hsp, queryLen, localDict,
'queryEndInSubject (%d) < subjectEnd (%d)' %
(queryEndInSubject, subjectEnd))
def normalizeHSP(hsp, queryLen, diamondTask):
"""
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in DIAMOND output properly.
hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates
negative or positive sense (i.e., the direction of reading through the
query to get the alignment). The frame value is the nucleotide match offset
modulo 3, plus one (i.e., it tells us which of the 3 possible query reading
frames was used in the match).
NOTE: the returned readStartInSubject value may be negative. We consider
the subject sequence to start at offset 0. So if the query string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the subject. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record.
All passed offsets are 1-based.
@param queryLen: the length of the query sequence.
@param diamondTask: The C{str} command-line matching algorithm that was
run (either 'blastx' or 'blastp').
@return: A C{dict} with C{str} keys and C{int} offset values. Keys are
readStart
readEnd
readStartInSubject
readEndInSubject
subjectStart
subjectEnd
The returned offset values are all zero-based.
"""
queryGaps, subjectGaps = countGaps(hsp['btop'])
# Make some variables using Python's standard string indexing (start
# offset included, end offset not). No calculations in this function
# are done with the original 1-based HSP variables.
queryStart = hsp['query_start'] - 1
queryEnd = hsp['query_end']
subjectStart = hsp['sbjct_start'] - 1
subjectEnd = hsp['sbjct_end']
queryReversed = hsp['frame'] < 0
# Query offsets must be ascending, unless we're looking at blastx output
# and the query was reversed for the match.
if queryStart >= queryEnd:
if diamondTask == 'blastx' and queryReversed:
# Compute new query start and end indices, based on their
# distance from the end of the string.
#
# Above we took one off the start index, so we need to undo
# that (because the start is actually the end). We didn't take
# one off the end index, and need to do that now (because the
# end is actually the start).
queryStart = queryLen - (queryStart + 1)
queryEnd = queryLen - (queryEnd - 1)
else:
_debugPrint(hsp, queryLen, locals(), 'queryStart >= queryEnd')
if diamondTask == 'blastx':
# In DIAMOND blastx output, subject offsets are based on protein
# sequence length but queries (and the reported offsets) are
# nucleotide. Convert the query offsets to protein because we will
# plot against the subject (protein).
#
# Convert queryLen and the query nucleotide start and end offsets
# to be valid for the query after translation to AAs. When
# translating, DIAMOND may ignore some nucleotides at the start
# and/or the end of the original DNA query. At the start this is
# due to the frame in use, and at the end it is due to always using
# three nucleotides at a time to form codons.
#
# So, for example, a query of 6 nucleotides that is translated in
# frame 2 (i.e., the translation starts from the second nucleotide)
# will have length 1 as an AA sequence. The first nucleotide is
# ignored due to the frame and the last two due to there not being
# enough final nucleotides to make another codon.
#
# In the following, the subtraction accounts for the first form of
# loss and the integer division for the second.
initiallyIgnored = abs(hsp['frame']) - 1
queryLen = (queryLen - initiallyIgnored) // 3
queryStart = (queryStart - initiallyIgnored) // 3
queryEnd = (queryEnd - initiallyIgnored) // 3
# unmatchedQueryLeft is the number of query bases that will extend
# to the left of the start of the subject in our plots.
unmatchedQueryLeft = queryStart
# Set the query offsets into the subject.
queryStartInSubject = subjectStart - unmatchedQueryLeft
queryEndInSubject = queryStartInSubject + queryLen + queryGaps
_sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, locals())
return {
'readStart': queryStart,
'readEnd': queryEnd,
'readStartInSubject': queryStartInSubject,
'readEndInSubject': queryEndInSubject,
'subjectStart': subjectStart,
'subjectEnd': subjectEnd,
}
| 0 | 0 | 0 |
801fc9f57fbc08f68d7eef8897a8e641de814a29 | 126,006 | py | Python | orcid_hub/utils.py | tenet-ac-za/NZ-ORCID-Hub | f1183fbb94509b102fa58d7812ed33d8f35c5d4d | [
"MIT"
] | 15 | 2017-02-06T01:41:57.000Z | 2021-07-22T08:53:40.000Z | orcid_hub/utils.py | tenet-ac-za/NZ-ORCID-Hub | f1183fbb94509b102fa58d7812ed33d8f35c5d4d | [
"MIT"
] | 82 | 2017-03-23T00:30:04.000Z | 2022-02-01T00:10:34.000Z | orcid_hub/utils.py | tenet-ac-za/NZ-ORCID-Hub | f1183fbb94509b102fa58d7812ed33d8f35c5d4d | [
"MIT"
] | 6 | 2017-03-23T07:26:05.000Z | 2021-02-23T11:20:21.000Z | # -*- coding: utf-8 -*-
"""Various utilities."""
import json
import logging
import os
import random
import string
import time
from collections import defaultdict
from datetime import date, datetime, timedelta
from itertools import filterfalse, groupby
from urllib.parse import quote, urlencode, urlparse
import emails
import flask
import requests
import yaml
from flask import request, url_for
from flask_login import current_user
from html2text import html2text
from jinja2 import Template
from peewee import JOIN
from yaml.dumper import Dumper
from yaml.representer import SafeRepresenter
from orcid_api_v3.rest import ApiException
from . import app, db, orcid_client, rq
from .models import (
AFFILIATION_TYPES,
Affiliation,
AffiliationRecord,
Delegate,
FundingInvitee,
FundingRecord,
Invitee,
Log,
MailLog,
MessageRecord,
NestedDict,
OrcidApiCall,
OrcidToken,
Organisation,
OrgInvitation,
OtherIdRecord,
PartialDate,
PeerReviewExternalId,
PeerReviewInvitee,
PeerReviewRecord,
PropertyRecord,
RecordInvitee,
ResourceRecord,
Role,
Task,
TaskType,
User,
UserInvitation,
UserOrg,
WorkInvitee,
WorkRecord,
get_val,
readup_file,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
EDU_CODES = {"student", "edu", "education"}
EMP_CODES = {"faculty", "staff", "emp", "employment"}
DST_CODES = {"distinction", "dist", "dst"}
INV_POS_CODES = {"invited-position", "position"}
QUA_CODES = {"qualification", "qua"}
MEM_CODES = {"membership", "mem"}
SER_CODES = {"service", "ser"}
ENV = app.config.get("ENV")
EXTERNAL_SP = app.config.get("EXTERNAL_SP")
def get_next_url(endpoint=None):
"""Retrieve and sanitize next/return URL."""
_next = (
request.args.get("next")
or request.args.get("_next")
or request.args.get("url")
or request.referrer
)
if not _next and endpoint:
_next = url_for(endpoint)
if _next:
if _next.startswith("/"):
return _next
try:
csrf = urlparse(_next).netloc
if (
csrf == urlparse(app.config.get("APP_URL")).netloc
or csrf.startswith("127.0.")
or csrf in app.config.get("CSRF_DOMAINS")
):
return _next
except:
pass
try:
if Delegate.select().where(Delegate.hostname ** f"%{urlparse(_next).netloc}%").exists():
return _next
except:
pass
return None
def is_valid_url(url):
"""Validate URL (expexted to have a path)."""
try:
result = urlparse(url)
return result.scheme and result.netloc and (result.path or result.path == "")
except:
return False
def read_uploaded_file(form):
"""Read up the whole content and deconde it and return the whole content."""
if "file_" not in request.files:
return
content = readup_file(request.files[form.file_.name])
if content:
return content
raise ValueError("Unable to decode encoding.")
def send_email(
template,
recipient,
cc_email=None,
sender=(app.config.get("APP_NAME"), app.config.get("MAIL_DEFAULT_SENDER")),
reply_to=app.config.get("MAIL_SUPPORT_ADDRESS"),
subject=None,
base=None,
logo=None,
org=None,
**kwargs,
):
"""Send an email, acquiring its payload by rendering a jinja2 template.
:type template: :class:`str`
:param subject: the subject of the email
:param base: the base template of the email messagess
:param template: name of the template file in ``templates/emails`` to use
:type recipient: :class:`tuple` (:class:`str`, :class:`str`)
:param recipient: 'To' (name, email) or just an email address
:type sender: :class:`tuple` (:class:`str`, :class:`str`)
:param sender: 'From' (name, email)
:param org: organisation on which behalf the email is sent
* `recipient` and `sender` are made available to the template as variables
* In any email tuple, name may be ``None``
* The subject is retrieved from a sufficiently-global template variable;
typically set by placing something like
``{% set subject = "My Subject" %}``
at the top of the template used (it may be inside some blocks
(if, elif, ...) but not others (rewrap, block, ...).
If it's not present, it defaults to "My Subject".
* With regards to line lengths: :class:`email.mime.text.MIMEText` will
(at least, in 2.7) encode the body of the text in base64 before sending
it, text-wrapping the base64 data. You will therefore not have any
problems with SMTP line length restrictions, and any concern to line
lengths is purely aesthetic or to be nice to the MUA.
:class:`RewrapExtension` may be used to wrap blocks of text nicely.
Note that ``{{ variables }}`` in manually wrapped text can cause
problems!
"""
if not org and current_user and not current_user.is_anonymous:
org = current_user.organisation
app = flask.current_app
jinja_env = flask.current_app.jinja_env
if logo is None:
if org and org.logo:
logo = url_for("logo_image", token=org.logo.token, _external=True)
else:
logo = url_for("static", filename="images/banner-small.png", _external=True)
if not base and org:
if org.email_template_enabled and org.email_template:
base = org.email_template
if not base:
base = app.config.get("DEFAULT_EMAIL_TEMPLATE")
jinja_env = jinja_env.overlay(autoescape=False)
if "\n" not in template and template.endswith(".html"):
template = jinja_env.get_template(template)
else:
template = Template(template)
kwargs["sender"] = _jinja2_email(*sender)
if isinstance(recipient, str):
recipient = (recipient, recipient)
kwargs["recipient"] = _jinja2_email(*recipient)
if subject is not None:
kwargs["subject"] = subject
rendered = template.make_module(vars=kwargs)
if subject is None:
subject = getattr(rendered, "subject", "Welcome to the NZ ORCID Hub")
html_msg = base.format(
EMAIL=kwargs["recipient"]["email"],
SUBJECT=subject,
MESSAGE=str(rendered),
LOGO=logo,
BASE_URL=url_for("index", _external=True)[:-1],
INCLUDED_URL=kwargs.get("invitation_url", "") or kwargs.get("include_url", ""),
)
plain_msg = html2text(html_msg)
msg = emails.html(
subject=subject,
mail_from=(app.config.get("APP_NAME", "ORCID Hub"), app.config.get("MAIL_DEFAULT_SENDER")),
html=html_msg,
text=plain_msg,
)
dkim_key_path = app.config.get("DKIM_KEY_PATH")
dkim_domain = app.config.get("MAIL_DKIM_DOMAIN")
dkim_selector = app.config.get("MAIL_DKIM_SELECTOR")
if dkim_key_path and os.path.exists(dkim_key_path):
with open(dkim_key_path) as key_file:
msg.dkim(key=key_file, domain=dkim_domain, selector=dkim_selector)
elif dkim_key_path:
raise Exception(f"Cannot find DKIM key file: {dkim_key_path}!")
if cc_email:
msg.cc.append(cc_email)
msg.mail_to.append(recipient)
# Unsubscribe link:
token = new_invitation_token(length=10)
unsubscribe_url = url_for("unsubscribe", token=token, _external=True)
headers = {
"x-auto-response-suppress": "DR, RN, NRN, OOF",
"auto-submitted": "auto-generated",
"List-Unsubscribe": f"<{unsubscribe_url}>",
}
if reply_to:
headers["reply-to"] = reply_to
msg.set_headers(headers)
smtp = dict(host=app.config["MAIL_SERVER"], port=app.config["MAIL_PORT"])
if "MAIL_PORT" in app.config:
smtp["port"] = app.config["MAIL_PORT"]
if "MAIL_USE_TLS" in app.config:
smtp["tls"] = app.config["MAIL_USE_TLS"]
if "MAIL_USERNAME" in app.config:
smtp["user"] = app.config["MAIL_USERNAME"]
if "MAIL_PASSWORD" in app.config:
smtp["password"] = app.config["MAIL_PASSWORD"]
resp = msg.send(smtp=smtp)
MailLog.create(
org=org,
recipient=recipient[1],
sender=sender[1],
subject=subject,
was_sent_successfully=resp.success,
error=resp.error,
token=token,
)
if not resp.success:
raise Exception(
f"Failed to email the message: {resp.error}. Please contact a Hub administrator!"
)
def new_invitation_token(length=5):
"""Generate a unique invitation token."""
while True:
token = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
if not (
UserInvitation.select().where(UserInvitation.token == token).exists()
or OrgInvitation.select().where(OrgInvitation.token == token).exists()
or MailLog.select().where(MailLog.token == token).exists()
):
break
return token
def append_qs(url, **qs):
"""Append new query strings to an arbitraty URL."""
return url + ("&" if urlparse(url).query else "?") + urlencode(qs, doseq=True)
def track_event(category, action, label=None, value=0):
"""Track application events with Google Analytics."""
ga_tracking_id = app.config.get("GA_TRACKING_ID")
if not ga_tracking_id:
return
data = {
"v": "1", # API Version.
"tid": ga_tracking_id, # Tracking ID / Property ID.
# Anonymous Client Identifier. Ideally, this should be a UUID that
# is associated with particular user, device, or browser instance.
"cid": current_user.uuid,
"t": "event", # Event hit type.
"ec": category, # Event category.
"ea": action, # Event action.
"el": label, # Event label.
"ev": value, # Event value, must be an integer
}
response = requests.post("http://www.google-analytics.com/collect", data=data)
# If the request fails, this will raise a RequestException. Depending
# on your application's needs, this may be a non-error and can be caught
# by the caller.
response.raise_for_status()
# Returning response only for test, but can be used in application for some other reasons
return response
def set_server_name():
"""Set the server name for batch processes."""
if not app.config.get("SERVER_NAME"):
if EXTERNAL_SP:
app.config["SERVER_NAME"] = "127.0.0.1:5000"
else:
app.config["SERVER_NAME"] = (
"orcidhub.org.nz" if ENV == "prod" else ENV + ".orcidhub.org.nz"
)
def is_org_rec(org, rec):
"""Test if the record was authoritized by the organisation."""
client_id = org.orcid_client_id
source_client_id = rec.get("source").get("source-client-id")
return source_client_id and source_client_id.get("path") == client_id
def create_or_update_work(user, org_id, records, *args, **kwargs):
"""Create or update work record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("activities-summary")
works = []
for r in activities.get("works").get("group"):
ws = r.get("work-summary")[0]
if is_org_rec(org, ws):
works.append(ws)
taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code}
def match_put_code(records, record, invitee):
"""Match and assign put-code to a single work record and the existing ORCID records."""
if invitee.put_code:
return
for r in records:
put_code = r.get("put-code")
if put_code in taken_put_codes:
continue
if (
record.title
and record.type
and (r.get("title", "title", "value", default="") or "").lower()
== record.title.lower()
and (r.get("type", default="") or "").lower() == record.type.lower()
):
invitee.put_code = put_code
invitee.visibility = r.get("visibility")
invitee.save()
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the work record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
wr = task_by_user.record
wi = task_by_user.record.invitee
match_put_code(works, wr, wi)
for task_by_user in records:
wi = task_by_user.record.invitee
try:
put_code, orcid, created, visibility = api.create_or_update_work(task_by_user)
if created:
wi.add_status_line("Work record was created.")
else:
wi.add_status_line("Work record was updated.")
wi.orcid = orcid
wi.put_code = put_code
if wi.visibility != visibility:
wi.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
exception_msg = json.loads(ex.body) if hasattr(ex, "body") else str(ex)
wi.add_status_line(f"Exception occured processing the record: {exception_msg}.")
wr.add_status_line(
f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}."
)
finally:
wi.processed_at = datetime.utcnow()
wr.save()
wi.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_peer_review(user, org_id, records, *args, **kwargs):
"""Create or update peer review record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
if profile_record:
peer_reviews = [
s
for ag in profile_record.get("activities-summary", "peer-reviews", "group", default=[])
for pg in ag.get("peer-review-group", default=[])
for s in pg.get("peer-review-summary", default=[])
if is_org_rec(org, s)
]
taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code}
def match_put_code(records, record, invitee, taken_external_id_values):
"""Match and assign put-code to a single peer review record and the existing ORCID records."""
if invitee.put_code:
return
for r in records:
put_code = r.get("put-code")
external_id_value = (
r.get("external-ids").get("external-id")[0].get("external-id-value")
if r.get("external-ids")
and r.get("external-ids").get("external-id")
and r.get("external-ids").get("external-id")[0].get("external-id-value")
else None
)
if put_code in taken_put_codes:
continue
if (
record.review_group_id
and external_id_value in taken_external_id_values
and (r.get("review-group-id", default="") or "").lower()
== record.review_group_id.lower()
): # noqa: E127
invitee.put_code = put_code
invitee.save()
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the peer review record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
pr = task_by_user.record
pi = pr.invitee
external_ids = PeerReviewExternalId.select().where(
PeerReviewExternalId.record_id == pr.id
)
taken_external_id_values = {ei.value for ei in external_ids if ei.value}
match_put_code(peer_reviews, pr, pi, taken_external_id_values)
for task_by_user in records:
pr = task_by_user.record
pi = pr.invitee
try:
put_code, orcid, created, visibility = api.create_or_update_peer_review(
task_by_user
)
if created:
pi.add_status_line("Peer review record was created.")
else:
pi.add_status_line("Peer review record was updated.")
pi.orcid = orcid
pi.put_code = put_code
if pi.visibility != visibility:
pi.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
exception_msg = json.loads(ex.body) if hasattr(ex, "body") else str(ex)
pi.add_status_line(f"Exception occured processing the record: {exception_msg}.")
pr.add_status_line(
f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}."
)
finally:
pi.processed_at = datetime.utcnow()
pr.save()
pi.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_funding(user, org_id, records, *args, **kwargs):
"""Create or update funding record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("activities-summary")
fundings = []
for r in activities.get("fundings").get("group"):
fs = r.get("funding-summary")[0]
if is_org_rec(org, fs):
fundings.append(fs)
taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code}
def match_put_code(records, record, invitee):
"""Match and asign put-code to a single funding record and the existing ORCID records."""
if invitee.put_code:
return
for r in records:
put_code = r.get("put-code")
if put_code in taken_put_codes:
continue
if (
record.title
and record.type
and record.org_name
and (r.get("title", "title", "value", default="") or "").lower()
== record.title.lower()
and (r.get("type", default="") or "").lower() == record.type.lower()
and (r.get("organization", "name", default="") or "").lower()
== record.org_name.lower()
):
invitee.put_code = put_code
invitee.visibility = r.get("visibility")
invitee.save()
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the funding record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
fr = task_by_user.record
fi = task_by_user.record.invitee
match_put_code(fundings, fr, fi)
for task_by_user in records:
fi = task_by_user.record.invitee
try:
put_code, orcid, created, visibility = api.create_or_update_funding(task_by_user)
if created:
fi.add_status_line("Funding record was created.")
else:
fi.add_status_line("Funding record was updated.")
fi.orcid = orcid
fi.put_code = put_code
if fi.visibility != visibility:
fi.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
if ex and hasattr(ex, "body"):
exception_msg = json.loads(ex.body)
else:
exception_msg = str(ex)
fi.add_status_line(f"Exception occured processing the record: {exception_msg}.")
fr.add_status_line(
f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}."
)
finally:
fi.processed_at = datetime.utcnow()
fr.save()
fi.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_resources(user, org_id, records, *args, **kwargs):
"""Create or update research resource record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(org_id)
token = (
OrcidToken.select(OrcidToken.access_token)
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org.id,
OrcidToken.scopes.contains("/activities/update"),
)
.first()
)
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
resources = api.get_resources()
if resources:
resources = resources.get("group")
resources = [
r
for r in resources
if any(
rr.get("source", "source-client-id", "path") == org.orcid_client_id
for rr in r.get("research-resource-summary")
)
]
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
def match_record(records, record):
"""Match and assign put-code to the existing ORCID records."""
if record.put_code:
return record.put_code
for r in records:
if all(
eid.get("external-id-value") != record.proposal_external_id_value
for eid in r.get("external-ids", "external-id")
):
continue
for rr in r.get("research-resource-summary"):
put_code = rr.get("put-code")
# if all(eid.get("external-id-value") != record.external_id_value
# for eid in rr.get("proposal", "external-ids", "external-id")):
# continue
if put_code in taken_put_codes:
continue
record.put_code = put_code
if not record.visibility:
record.visibility = r.get("visibility")
if not record.display_index:
record.display_index = r.get("display-index")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the other id record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
return put_code
for t in records:
try:
rr = t.record
put_code = match_record(resources, rr)
if put_code:
resp = api.put(f"research-resource/{put_code}", rr.orcid_research_resource)
else:
resp = api.post("research-resource", rr.orcid_research_resource)
if resp.status == 201:
orcid, put_code = resp.headers["Location"].split("/")[-3::2]
rr.add_status_line("ORCID record was created.")
else:
orcid = user.orcid
rr.add_status_line("ORCID record was updated.")
if not rr.put_code and put_code:
rr.put_code = int(put_code)
if not rr.orcid and orcid:
rr.orcid = orcid
visibility = (
json.loads(resp.data).get("visibility")
if hasattr(resp, "data") and resp.data
else None
)
if rr.visibility != visibility:
rr.visibility = visibility
except ApiException as ex:
if ex.status == 404:
rr.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.processed_at = datetime.utcnow()
rr.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_record_from_messages(records, *args, **kwargs):
"""Create or update ORCID record of a user form the given message.
:param records: iterator with records for a single profile.
"""
records = list(unique_everseen(records, key=lambda t: t.record.id))
if not records:
return
# add a few shortcuts:
for r in records:
r.record.msg = json.loads(r.record.message, object_pairs_hook=NestedDict)
r.record.invitee = r.record.ri.invitee
rec0 = records[0]
org = rec0.org
user = rec0.record.invitee.user
token = user.token
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
resources = api.get_resources()
if resources:
resources = resources.get("group")
resources = [
r
for r in resources
if any(
rr.get("source", "source-client-id", "path") == org.orcid_client_id
for rr in r.get("research-resource-summary")
)
]
taken_put_codes = {
r.record.ri.invitee.put_code for r in records if r.record.ri.invitee.put_code
}
def match_record(resource, record):
"""Match and assign put-code to the existing ORCID records."""
put_code = record.invitee.put_code
if put_code:
for rr in (rr for r in resources for rr in r.get("research-resource-summary")):
if rr.get("put-code") == put_code:
record.invitee.visibility = rr.get("visibility")
break
return put_code
external_ids = record.msg.get("proposal", "external-ids", "external-id", default=[])
for r in resource:
res_external_ids = r.get("external-ids", "external-id")
if all(
eid.get("external-id-value") != rec_eid.get("external-id-value")
for eid in res_external_ids
for rec_eid in external_ids
):
continue
for rr in r.get("research-resource-summary"):
proposal_external_ids = record.msg.get(
"proposal", "external-ids", "external-id"
)
if all(
eid.get("external-id-value") != rec_eid.get("external-id-value")
for rec_eid in proposal_external_ids
for eid in rr.get("proposal", "external-ids", "external-id")
):
continue
put_code = rr.get("put-code")
if put_code in taken_put_codes:
continue
record.invitee.put_code = put_code
record.ri.invitee.visibility = rr.get("visibility")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
return put_code
for t in records:
try:
rr = t.record
if not rr.invitee.orcid:
rr.invitee.orcid = user.orcid
put_code = match_record(resources, rr)
if "visibility" in rr.msg:
del rr.msg["visibility"]
if put_code:
rr.msg["put-code"] = put_code
resp = api.put(f"research-resource/{put_code}", rr.msg)
else:
resp = api.post("research-resource", rr.msg)
if resp.status == 201:
rr.invitee.add_status_line("ORCID record was created.")
else:
rr.invitee.add_status_line("ORCID record was updated.")
if not put_code:
location = resp.headers["Location"]
rr.invitee.put_code = location.split("/")[-1]
rec = api.get(location)
rr.invitee.visibility = rec.json.get("visibility")
except ApiException as ex:
if ex.status == 404:
rr.invitee.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.invitee.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.invitee.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.invitee.processed_at = datetime.utcnow()
rr.invitee.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
@rq.job(timeout=300)
def send_user_invitation(
inviter,
org,
email=None,
first_name=None,
last_name=None,
user=None,
task_id=None,
task_type=None,
affiliation_types=None,
orcid=None,
department=None,
organisation=None,
city=None,
region=None,
country=None,
course_or_role=None,
start_date=None,
end_date=None,
affiliations=Affiliation.NONE,
disambiguated_id=None,
disambiguation_source=None,
cc_email=None,
invitation_template=None,
**kwargs,
):
"""Send an invitation to join ORCID Hub logging in via ORCID."""
try:
if not email:
if user and user.email:
email = user.email
else:
raise Exception(
"Failed to find the email address for the record. Cannot send an invitation."
)
else:
email = email.lower()
if isinstance(inviter, int):
inviter = User.get(id=inviter)
if isinstance(org, int):
org = Organisation.get(id=org)
if isinstance(start_date, list):
start_date = PartialDate(*start_date)
if isinstance(end_date, list):
end_date = PartialDate(*end_date)
set_server_name()
task_type = task_type or (Task.get(task_id).task_type if task_id else TaskType.AFFILIATION)
if not invitation_template:
if task_type != TaskType.AFFILIATION:
invitation_template = f"email/{task_type.name.lower()}_invitation.html"
else:
invitation_template = "email/researcher_invitation.html"
if task_type == TaskType.AFFILIATION:
logger.info(
f"*** Sending an invitation to '{first_name} {last_name} <{email}>' "
f"submitted by {inviter} of {org} for affiliations: {affiliation_types}"
)
else:
logger.info(
f"*** Sending an invitation to '{first_name} <{email}>' "
f"submitted by {inviter} of {org}"
)
email = email.lower()
if not user or not user.id:
user, user_created = User.get_or_create(email=email)
if user_created:
user.organisation = org
user.created_by = inviter.id
user.first_name = first_name or "N/A"
user.last_name = last_name or "N/A"
else:
user.updated_by = inviter.id
if first_name and not user.first_name:
user.first_name = first_name
if last_name and not user.last_name:
user.last_name = last_name
if not first_name:
first_name = user.first_name
if not last_name:
last_name = user.last_name
user.roles |= Role.RESEARCHER
token = new_invitation_token()
with app.app_context():
invitation_url = flask.url_for(
"orcid_login",
invitation_token=token,
_external=True,
_scheme="http" if app.debug else "https",
)
send_email(
invitation_template,
recipient=(org.name if org else user.organisation.name, user.email),
reply_to=f"{inviter.name} <{inviter.email}>",
cc_email=cc_email,
invitation_url=invitation_url,
org_name=org.name if org else user.organisation.name,
org=org,
user=user,
)
user.save()
user_org, user_org_created = UserOrg.get_or_create(user=user, org=org)
if user_org_created:
user_org.created_by = inviter.id
if not affiliations and affiliation_types:
if affiliation_types & EMP_CODES:
affiliations |= Affiliation.EMP
if affiliation_types & EDU_CODES:
affiliations |= Affiliation.EDU
user_org.affiliations = affiliations
else:
user_org.updated_by = inviter.id
user_org.save()
ui = UserInvitation.create(
task_id=task_id,
invitee_id=user.id,
inviter_id=inviter.id,
org=org,
email=email,
first_name=first_name,
last_name=last_name,
orcid=orcid,
department=department,
organisation=organisation,
city=city,
region=region,
country=country,
course_or_role=course_or_role,
start_date=start_date,
end_date=end_date,
affiliations=affiliations,
disambiguated_id=disambiguated_id,
disambiguation_source=disambiguation_source,
token=token,
)
status = "The invitation sent at " + datetime.utcnow().isoformat(timespec="seconds")
if task_type == TaskType.AFFILIATION:
(
AffiliationRecord.update(status=AffiliationRecord.status + "\n" + status)
.where(
AffiliationRecord.status.is_null(False),
AffiliationRecord.task_id == task_id,
AffiliationRecord.email == email,
)
.execute()
)
(
AffiliationRecord.update(status=status)
.where(
AffiliationRecord.task_id == task_id,
AffiliationRecord.status.is_null(),
AffiliationRecord.email == email,
)
.execute()
)
elif task_type in [TaskType.FUNDING, TaskType.WORK, TaskType.PEER_REVIEW]:
task = Task.get(task_id)
for record in task.records.where(task.record_model.is_active):
invitee_class = record.invitees.model
invitee_class.update(status=status).where(
invitee_class.record == record.id, invitee_class.email == email
).execute()
return ui
except Exception as ex:
logger.exception(f"Exception occured while sending mail {ex}")
raise
def unique_everseen(iterable, key=None):
"""List unique elements, preserving order. Remember all elements ever seen.
The snippet is taken form https://docs.python.org/3.6/library/itertools.html#itertools-recipes
>>> unique_everseen('AAAABBBCCDAABBB')
A B C D
>>> unique_everseen('ABBCcAD', str.lower)
A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def create_or_update_properties(user, org_id, records, *args, **kwargs):
"""Create or update researcher property records of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(org_id)
profile_record = None
token = (
OrcidToken.select(OrcidToken.access_token)
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org.id,
OrcidToken.scopes.contains("/person/update"),
)
.first()
)
if token:
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("person")
researcher_urls = [
r
for r in (activities.get("researcher-urls", "researcher-url", default=[]))
if is_org_rec(org, r)
]
other_names = [
r
for r in (activities.get("other-names", "other-name", default=[]))
if is_org_rec(org, r)
]
keywords = [
r for r in (activities.get("keywords", "keyword", default=[])) if is_org_rec(org, r)
]
countries = [
r for r in (activities.get("addresses", "address", default=[])) if is_org_rec(org, r)
]
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
def match_put_code(record):
"""Match and assign put-code to the existing ORCID records."""
for r in (
researcher_urls
if record.type == "URL"
else other_names
if record.type == "NAME"
else countries
if record.type == "COUNTRY"
else keywords
):
try:
orcid, put_code = r.get("path").split("/")[-3::2]
except Exception:
app.logger.exception("Failed to get ORCID iD/put-code from the response.")
raise Exception("Failed to get ORCID iD/put-code from the response.")
if record.put_code:
return
if put_code in taken_put_codes:
continue
if (
record.value
and (
record.name
and record.type == "URL"
and (r.get("url-name", default="") or "").lower() == record.name.lower()
and (r.get("url", "value", default="") or "").lower()
== record.value.lower()
)
or (
record.type in ["NAME", "KEYWORD"]
and (r.get("content", default="") or "").lower() == record.value.lower()
)
or (
record.type == "COUNTRY"
and (r.get("country", "value", default="") or "").lower()
== record.value.lower()
)
): # noqa: E129
record.put_code = put_code
record.orcid = orcid
record.visibility = r.get("visibility")
if not record.display_index:
record.display_index = r.get("display-index")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the record (ID: {record.id}, Task ID: {record.task_id})"
)
break
for rr in (t.record for t in records):
try:
match_put_code(rr)
if rr.type == "URL":
put_code, orcid, created, visibility = api.create_or_update_researcher_url(
**rr.__data__
)
elif rr.type == "NAME":
put_code, orcid, created, visibility = api.create_or_update_other_name(
**rr.__data__
)
elif rr.type == "COUNTRY":
put_code, orcid, created, visibility = api.create_or_update_address(
**rr.__data__
)
else:
put_code, orcid, created, visibility = api.create_or_update_keyword(
**rr.__data__
)
if created:
rr.add_status_line(f"Researcher {rr.type} record was created.")
else:
rr.add_status_line(f"Researcher {rr.type} record was updated.")
rr.orcid = orcid
rr.put_code = put_code
if rr.visibility != visibility:
rr.visibility = visibility
except ApiException as ex:
if ex.status == 404:
rr.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.processed_at = datetime.utcnow()
rr.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
# TODO: delete
def create_or_update_other_id(user, org_id, records, *args, **kwargs):
"""Create or update Other Id record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
profile_record = None
token = (
OrcidToken.select(OrcidToken.access_token)
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org.id,
OrcidToken.scopes.contains("/person/update"),
)
.first()
)
if token:
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("person")
other_id_records = [
r
for r in (activities.get("external-identifiers").get("external-identifier"))
if is_org_rec(org, r)
]
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
def match_put_code(records, record):
"""Match and assign put-code to the existing ORCID records."""
for r in records:
try:
orcid, put_code = r.get("path").split("/")[-3::2]
except Exception:
app.logger.exception("Failed to get ORCID iD/put-code from the response.")
raise Exception("Failed to get ORCID iD/put-code from the response.")
if record.put_code:
return
if put_code in taken_put_codes:
continue
if (
record.type
and record.value
and (r.get("external-id-type", default="") or "").lower()
== record.type.lower()
and (r.get("external-id-value", default="") or "").lower()
== record.value.lower()
):
record.put_code = put_code
record.orcid = orcid
record.visibility = r.get("visibility")
if not record.display_index:
record.display_index = r.get("display-index")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the other id record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
try:
rr = task_by_user.record
match_put_code(other_id_records, rr)
put_code, orcid, created, visibility = api.create_or_update_person_external_id(
**rr.__data__
)
if created:
rr.add_status_line("Other ID record was created.")
else:
rr.add_status_line("Other ID record was updated.")
rr.orcid = orcid
rr.put_code = put_code
if rr.visibility != visibility:
rr.visibility = visibility
except ApiException as ex:
if ex.status == 404:
rr.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.processed_at = datetime.utcnow()
rr.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_affiliations(user, org_id, records, *args, **kwargs):
"""Create or update affiliation record of a user.
1. Retries user edurcation and employment summamy from ORCID;
2. Match the recodrs with the summary;
3. If there is match update the record;
4. If no match create a new one.
"""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
orcid_affiliation_types = [
"employment",
"education",
"distinction",
"membership",
"service",
"qualification",
"invited-position",
]
if profile_record:
affiliations = {
at: [
s.get(f"{at}-summary")
for ag in profile_record.get(
"activities-summary", f"{at}s", "affiliation-group", default=[]
)
for s in ag.get("summaries", default=[])
if is_org_rec(org, s.get(f"{at}-summary"))
]
for at in orcid_affiliation_types
}
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
put_codes = {at: [e["put-code"] for e in records] for at, records in affiliations.items()}
def match_put_code(records, record):
"""Match and asign put-code to a single affiliation record and the existing ORCID records."""
for r in records:
try:
orcid, rec_type, put_code = r.get("path").split("/")[-3:]
except Exception:
app.logger.exception("Failed to get ORCID iD/put-code from the response.")
raise Exception("Failed to get ORCID iD/put-code from the response.")
start_date = record.start_date.as_orcid_dict() if record.start_date else None
end_date = record.end_date.as_orcid_dict() if record.end_date else None
if (
r.get("start-date") == start_date
and r.get("end-date") == end_date
and (rec_type == "education" or r.get("department-name") == record.department)
and r.get("role-title") == record.role
and get_val(r, "organization", "name") == record.organisation
and get_val(r, "organization", "address", "city") == record.city
and get_val(r, "organization", "address", "region") == record.region
and get_val(r, "organization", "address", "country") == record.country
and get_val(
r,
"organization",
"disambiguated-organization",
"disambiguated-organization-identifier",
)
== record.disambiguated_id
and get_val(
r, "organization", "disambiguated-organization", "disambiguation-source"
)
== record.disambiguation_source
):
record.put_code = put_code
record.orcid = orcid
record.visibility = r.get("visibility")
return True
if record.put_code:
return
if put_code in taken_put_codes:
continue
if (
# pure vanilla:
(
r.get("start-date") is None
and r.get("end-date") is None
and r.get("department-name") is None
and r.get("role-title") is None
)
or
# partial match
(
(
# for 'edu' records department and start-date can be missing:
rec_type == "education"
or (
record.department
and r.get("start-date") == start_date
and r.get("department-name", default="").lower()
== record.department.lower()
)
)
and record.role
and r.get("role-title", default="".lower() == record.role.lower())
)
):
record.visibility = r.get("visibility")
record.put_code = put_code
record.orcid = orcid
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the affiliation record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
try:
ar = task_by_user.record
at = ar.affiliation_type.lower() if ar.affiliation_type else None
no_orcid_call = False
if ar.delete_record and profile_record:
try:
for at in orcid_affiliation_types:
if ar.put_code in put_codes[at]:
getattr(api, f"delete_{at}v3")(user.orcid, ar.put_code)
app.logger.info(
f"ORCID record of {user} with put-code {ar.put_code} was deleted."
)
break
else:
ar.add_status_line(
f"There is no record with the given put-code {ar.put_code} in the user {user} profile."
)
except Exception as ex:
ar.add_status_line(f"Exception occured processing the record: {ex}.")
ar.processed_at = datetime.utcnow()
ar.save()
continue
if at in EMP_CODES:
affiliation = Affiliation.EMP
elif at in DST_CODES:
affiliation = Affiliation.DST
elif at in MEM_CODES:
affiliation = Affiliation.MEM
elif at in SER_CODES:
affiliation = Affiliation.SER
elif at in QUA_CODES:
affiliation = Affiliation.QUA
elif at in INV_POS_CODES:
affiliation = Affiliation.POS
elif at in EDU_CODES:
affiliation = Affiliation.EDU
else:
logger.info(f"For {user} not able to determine affiliaton type with {org}")
ar.add_status_line(
f"Unsupported affiliation type '{at}' allowed values are: "
", ".join(at for at in AFFILIATION_TYPES)
)
ar.save()
continue
no_orcid_call = match_put_code(affiliations.get(str(affiliation).lower()), ar)
if no_orcid_call:
ar.add_status_line(f"{str(affiliation)} record unchanged.")
else:
put_code, orcid, created, visibility = api.create_or_update_affiliation(
affiliation=affiliation, **ar.__data__
)
if created:
ar.add_status_line(f"{str(affiliation)} record was created.")
else:
ar.add_status_line(f"{str(affiliation)} record was updated.")
ar.orcid = orcid
ar.put_code = put_code
if ar.visibility != visibility:
ar.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
ar.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
ar.processed_at = datetime.utcnow()
ar.save()
else:
for task_by_user in records:
user = User.get(email=task_by_user.record.email, organisation=task_by_user.org)
user_org = UserOrg.get(user=user, org=task_by_user.org)
token = new_invitation_token()
with app.app_context():
invitation_url = flask.url_for(
"orcid_login",
invitation_token=token,
_external=True,
_scheme="http" if app.debug else "https",
)
send_email(
"email/researcher_reinvitation.html",
recipient=(user.organisation.name, user.email),
reply_to=f"{task_by_user.created_by.name} <{task_by_user.created_by.email}>",
invitation_url=invitation_url,
org_name=user.organisation.name,
org=org,
user=user,
)
UserInvitation.create(
invitee_id=user.id,
inviter_id=task_by_user.created_by.id,
org=org,
email=user.email,
first_name=user.first_name,
last_name=user.last_name,
orcid=user.orcid,
organisation=org.name,
city=org.city,
region=org.region,
country=org.country,
start_date=task_by_user.record.start_date,
end_date=task_by_user.record.end_date,
affiliations=user_org.affiliations,
disambiguated_id=org.disambiguated_id,
disambiguation_source=org.disambiguation_source,
token=token,
)
status = "Exception occured while accessing user's profile. Hence, The invitation resent at "
status += datetime.utcnow().isoformat(timespec="seconds")
AffiliationRecord.update(status=AffiliationRecord.status + "\n" + status).where(
AffiliationRecord.status.is_null(False), AffiliationRecord.email == user.email
).execute()
AffiliationRecord.update(status=status).where(
AffiliationRecord.status.is_null(), AffiliationRecord.email == user.email
).execute()
return
@rq.job(timeout=300)
def process_work_records(max_rows=20, record_id=None):
"""Process uploaded work records."""
set_server_name()
task_ids = set()
work_ids = set()
"""This query is to retrieve Tasks associated with work records, which are not processed but are active"""
tasks = (
Task.select(
Task,
WorkRecord,
WorkInvitee,
User,
UserInvitation.id.alias("invitation_id"),
OrcidToken,
)
.where(
WorkRecord.processed_at.is_null(),
WorkInvitee.processed_at.is_null(),
WorkRecord.is_active,
(
OrcidToken.id.is_null(False)
| (
(WorkInvitee.status.is_null())
| (WorkInvitee.status.contains("sent").__invert__())
)
),
)
.join(WorkRecord, on=(Task.id == WorkRecord.task_id), attr="record")
.join(WorkInvitee, on=(WorkRecord.id == WorkInvitee.record_id), attr="invitee")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == WorkInvitee.email)
| ((User.orcid == WorkInvitee.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=((UserInvitation.email == WorkInvitee.email) & (UserInvitation.task_id == Task.id)),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
)
if record_id:
tasks = tasks.where(WorkRecord.id == record_id)
tasks = tasks.order_by(Task.id, Task.org_id, WorkRecord.id, User.id).limit(max_rows)
tasks = list(tasks)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(
tasks,
lambda t: (
t.id,
t.org_id,
t.record.id,
(lambda r: r.user if r.user.id else None)(t.record.invitee),
),
):
# If we have the token associated to the user then update the work record,
# otherwise send him an invite
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name,
),
): # noqa: E501
email = k[2]
try:
send_user_invitation(*k, task_id=task_id)
except Exception as ex:
(
WorkInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
WorkInvitee.email == email,
WorkInvitee.record_id == record_id,
WorkInvitee.processed_at.is_null(),
)
).execute()
else:
create_or_update_work(user, org_id, tasks_by_user)
task_ids.add(task_id)
work_ids.add(record_id)
for record in WorkRecord.select().where(WorkRecord.id << work_ids):
# The Work record is processed for all invitees
if not (
WorkInvitee.select()
.where(WorkInvitee.record_id == record.id, WorkInvitee.processed_at.is_null())
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Work record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
WorkRecord.select()
.where(WorkRecord.task_id == task.id, WorkRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
WorkRecord.select()
.where(WorkRecord.task_id == task.id, WorkRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"workrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/work_task_completed.html",
subject="Work Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Work",
filename=task.filename,
)
@rq.job(timeout=300)
def process_peer_review_records(max_rows=20, record_id=None):
"""Process uploaded peer_review records."""
set_server_name()
task_ids = set()
peer_review_ids = set()
"""This query is to retrieve Tasks associated with peer review records, which are not processed but are active"""
tasks = (
Task.select(
Task,
PeerReviewRecord,
PeerReviewInvitee,
User,
UserInvitation.id.alias("invitation_id"),
OrcidToken,
)
.where(
PeerReviewRecord.processed_at.is_null(),
PeerReviewInvitee.processed_at.is_null(),
PeerReviewRecord.is_active,
(
OrcidToken.id.is_null(False)
| (
(PeerReviewInvitee.status.is_null())
| (PeerReviewInvitee.status.contains("sent").__invert__())
)
),
)
.join(PeerReviewRecord, on=(Task.id == PeerReviewRecord.task_id), attr="record")
.join(
PeerReviewInvitee,
on=(PeerReviewRecord.id == PeerReviewInvitee.record_id),
attr="invitee",
)
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == PeerReviewInvitee.email)
| ((User.orcid == PeerReviewInvitee.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == PeerReviewInvitee.email)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
)
if record_id:
tasks = tasks.where(PeerReviewRecord.id == record_id)
tasks = tasks.order_by(Task.id, Task.org_id, PeerReviewRecord.id, User.id).limit(max_rows)
tasks = list(tasks)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(
tasks,
lambda t: (
t.id,
t.org_id,
t.record.id,
(lambda r: r.user if r.user.id else None)(t.record.invitee),
),
):
"""If we have the token associated to the user then update the peer record, otherwise send him an invite"""
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name,
),
): # noqa: E501
email = k[2]
try:
send_user_invitation(*k, task_id=task_id)
except Exception as ex:
(
PeerReviewInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
PeerReviewInvitee.email == email,
PeerReviewInvitee.record_id == record_id,
PeerReviewInvitee.processed_at.is_null(),
)
).execute()
else:
create_or_update_peer_review(user, org_id, tasks_by_user)
task_ids.add(task_id)
peer_review_ids.add(record_id)
for record in PeerReviewRecord.select().where(PeerReviewRecord.id << peer_review_ids):
# The Peer Review record is processed for all invitees
if not (
PeerReviewInvitee.select()
.where(
PeerReviewInvitee.record_id == record.id, PeerReviewInvitee.processed_at.is_null()
)
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Peer Review record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
PeerReviewRecord.select()
.where(PeerReviewRecord.task_id == task.id, PeerReviewRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
PeerReviewRecord.select()
.where(PeerReviewRecord.task_id == task.id, PeerReviewRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"peerreviewrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/work_task_completed.html",
subject="Peer Review Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Peer Review",
filename=task.filename,
)
@rq.job(timeout=300)
def process_funding_records(max_rows=20, record_id=None):
"""Process uploaded affiliation records."""
set_server_name()
task_ids = set()
funding_ids = set()
"""This query is to retrieve Tasks associated with funding records, which are not processed but are active"""
tasks = (
Task.select(
Task,
FundingRecord,
FundingInvitee,
User,
UserInvitation.id.alias("invitation_id"),
OrcidToken,
)
.where(
FundingRecord.processed_at.is_null(),
FundingInvitee.processed_at.is_null(),
FundingRecord.is_active,
(
OrcidToken.id.is_null(False)
| (
(FundingInvitee.status.is_null())
| (FundingInvitee.status.contains("sent").__invert__())
)
),
)
.join(FundingRecord, on=(Task.id == FundingRecord.task_id), attr="record")
.join(FundingInvitee, on=(FundingRecord.id == FundingInvitee.record_id), attr="invitee")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == FundingInvitee.email)
| ((User.orcid == FundingInvitee.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == FundingInvitee.email)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
.limit(max_rows)
)
if record_id:
tasks = tasks.where(FundingRecord.id == record_id)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(
tasks,
lambda t: (
t.id,
t.org_id,
t.record.id,
(lambda r: r.user if r.user.id else None)(t.record.invitee),
),
):
"""If we have the token associated to the user then update the funding record, otherwise send him an invite"""
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name,
),
): # noqa: E501
email = k[2]
try:
send_user_invitation(*k, task_id=task_id)
except Exception as ex:
(
FundingInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
FundingInvitee.email == email,
FundingInvitee.record_id == record_id,
FundingInvitee.processed_at.is_null(),
)
).execute()
else:
create_or_update_funding(user, org_id, tasks_by_user)
task_ids.add(task_id)
funding_ids.add(record_id)
for record in FundingRecord.select().where(FundingRecord.id << funding_ids):
# The funding record is processed for all invitees
if not (
FundingInvitee.select()
.where(FundingInvitee.record_id == record.id, FundingInvitee.processed_at.is_null())
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Funding record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
FundingRecord.select()
.where(FundingRecord.task_id == task.id, FundingRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
FundingRecord.select()
.where(FundingRecord.task_id == task.id, FundingRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"fundingrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/funding_task_completed.html",
subject="Funding Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
filename=task.filename,
)
@rq.job(timeout=300)
def process_affiliation_records(max_rows=20, record_id=None):
"""Process uploaded affiliation records."""
set_server_name()
# TODO: optimize removing redundant fields
# TODO: perhaps it should be broken into 2 queries
task_ids = set()
tasks = (
Task.select(
Task, AffiliationRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
AffiliationRecord.processed_at.is_null(),
AffiliationRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
AffiliationRecord.status.is_null()
| AffiliationRecord.status.contains("sent").__invert__()
)
)
),
)
.join(AffiliationRecord, on=(Task.id == AffiliationRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == AffiliationRecord.email)
| ((User.orcid == AffiliationRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == AffiliationRecord.email)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
.limit(max_rows)
)
if record_id:
if isinstance(record_id, list):
tasks = tasks.where(AffiliationRecord.id.in_(record_id))
else:
tasks = tasks.where(AffiliationRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
# maps invitation attributes to affiliation type set:
# - the user who uploaded the task;
# - the user organisation;
# - the invitee email;
# - the invitee first_name;
# - the invitee last_name
invitation_dict = {
k: set(t.record.affiliation_type.lower() for t in tasks)
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
), # noqa: E501
) # noqa: E501
}
for invitation, affiliations in invitation_dict.items():
email = invitation[2]
try:
send_user_invitation(
*invitation, affiliation_types=affiliations, task_id=task_id
)
except Exception as ex:
(
AffiliationRecord.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
AffiliationRecord.task_id == task_id,
AffiliationRecord.email == email,
AffiliationRecord.processed_at.is_null(),
)
).execute()
else: # user exits and we have tokens
create_or_update_affiliations(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
if not (
AffiliationRecord.select()
.where(AffiliationRecord.task_id == task.id, AffiliationRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
AffiliationRecord.select()
.where(AffiliationRecord.task_id == task.id, AffiliationRecord.status ** "%error%")
.count()
)
row_count = task.record_count
orcid_rec_count = (
task.affiliation_records.select(AffiliationRecord.orcid).distinct().count()
)
if task.filename and "INTEGRATION" not in task.filename:
with app.app_context():
export_url = flask.url_for(
"affiliationrecord.export",
export_type="csv",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Affiliation Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
orcid_rec_count=orcid_rec_count,
export_url=export_url,
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process comletion notification message."
)
@rq.job(timeout=300)
def process_property_records(max_rows=20, record_id=None):
"""Process uploaded property records."""
set_server_name()
# TODO: optimize removing redundant fields
# TODO: perhaps it should be broken into 2 queries
task_ids = set()
tasks = (
Task.select(
Task, PropertyRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
PropertyRecord.processed_at.is_null(),
PropertyRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
PropertyRecord.status.is_null()
| PropertyRecord.status.contains("sent").__invert__()
)
)
),
)
.join(PropertyRecord, on=(Task.id == PropertyRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == PropertyRecord.email)
| ((User.orcid == PropertyRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(
(UserInvitation.email == PropertyRecord.email)
| (UserInvitation.email == User.email)
)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/person/update"))
),
)
)
if max_rows:
tasks = tasks.limit(max_rows)
if record_id:
if isinstance(record_id, list):
tasks = tasks.where(PropertyRecord.id.in_(record_id))
else:
tasks = tasks.where(PropertyRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
not user
or not user.orcid
or not OrcidToken.select()
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org_id,
OrcidToken.scopes.contains("/person/update"),
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
user,
),
): # noqa: E501
try:
send_user_invitation(*k, task_id=task_id)
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds"
)
for r in tasks:
r.record.add_status_line(status)
r.record.save()
except Exception as ex:
for r in tasks:
r.record.add_status_line(f"Failed to send an invitation: {ex}.")
r.record.save()
else:
create_or_update_properties(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
if not (
PropertyRecord.select()
.where(PropertyRecord.task_id == task.id, PropertyRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
PropertyRecord.select()
.where(PropertyRecord.task_id == task.id, PropertyRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"propertyrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Researcher Property Record Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Researcher Property",
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process completion notification message."
)
@rq.job(timeout=300)
def process_other_id_records(max_rows=20, record_id=None):
"""Process uploaded Other ID records."""
set_server_name()
# TODO: optimize
task_ids = set()
tasks = (
Task.select(
Task, OtherIdRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
OtherIdRecord.processed_at.is_null(),
OtherIdRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
OtherIdRecord.status.is_null()
| OtherIdRecord.status.contains("sent").__invert__()
)
)
),
)
.join(OtherIdRecord, on=(Task.id == OtherIdRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == OtherIdRecord.email)
| ((User.orcid == OtherIdRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == OtherIdRecord.email) & (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/person/update"))
),
)
.limit(max_rows)
)
if record_id:
tasks = tasks.where(OtherIdRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/person/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
),
): # noqa: E501
try:
email = k[2]
send_user_invitation(
*k, task_id=task_id, invitation_template="email/property_invitation.html"
)
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds"
)
(
OtherIdRecord.update(status=OtherIdRecord.status + "\n" + status)
.where(
OtherIdRecord.status.is_null(False),
OtherIdRecord.task_id == task_id,
OtherIdRecord.email == email,
)
.execute()
)
(
OtherIdRecord.update(status=status)
.where(
OtherIdRecord.status.is_null(),
OtherIdRecord.task_id == task_id,
OtherIdRecord.email == email,
)
.execute()
)
except Exception as ex:
(
OtherIdRecord.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
OtherIdRecord.task_id == task_id,
OtherIdRecord.email == email,
OtherIdRecord.processed_at.is_null(),
)
).execute()
else:
create_or_update_other_id(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
if not (
OtherIdRecord.select()
.where(OtherIdRecord.task_id == task.id, OtherIdRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
OtherIdRecord.select()
.where(OtherIdRecord.task_id == task.id, OtherIdRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"otheridrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Other ID Record Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Other ID",
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process completion notification message."
)
@rq.job(timeout=300)
def process_resource_records(max_rows=20, record_id=None):
"""Process uploaded resoucre records."""
set_server_name()
# TODO: optimize removing redundant fields
# TODO: perhaps it should be broken into 2 queries
task_ids = set()
tasks = (
Task.select(
Task, ResourceRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
ResourceRecord.processed_at.is_null(),
ResourceRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
ResourceRecord.status.is_null()
| ResourceRecord.status.contains("sent").__invert__()
)
)
),
)
.join(ResourceRecord, on=(Task.id == ResourceRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == ResourceRecord.email)
| ((User.orcid == ResourceRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(
(UserInvitation.email == ResourceRecord.email)
| (UserInvitation.email == User.email)
)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
)
if max_rows:
tasks = tasks.limit(max_rows)
if record_id:
if isinstance(record_id, list):
tasks = tasks.where(ResourceRecord.id.in_(record_id))
else:
tasks = tasks.where(ResourceRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
not user
or not user.orcid
or not OrcidToken.select()
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org_id,
OrcidToken.scopes.contains("/activities/update"),
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
user,
),
): # noqa: E501
try:
send_user_invitation(*k, task_id=task_id)
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds"
)
for r in tasks:
r.record.add_status_line(status)
r.record.save()
except Exception as ex:
for r in tasks:
r.record.add_status_line(f"Failed to send an invitation: {ex}.")
r.record.save()
else:
create_or_update_resources(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
rm = task.record_model
if not (rm.select().where(rm.task_id == task.id, rm.processed_at.is_null()).exists()):
task.completed_at = datetime.utcnow()
task.save()
error_count = rm.select().where(rm.task_id == task.id, rm.status ** "%error%").count()
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"resourcerecord.export",
export_type="json" if task.is_raw else "csv",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Research Rresource Record Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Research Resource",
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process completion notification message."
)
@rq.job(timeout=300)
def process_message_records(max_rows=20, record_id=None):
"""Process uploaded ORCID message records."""
RecordInvitee = MessageRecord.invitees.get_through_model() # noqa: N806
set_server_name()
record_ids = set()
task_ids = set()
tasks = (
Task.select(Task, MessageRecord, RecordInvitee, Invitee, User, OrcidToken)
.where(Task.is_raw, Invitee.processed_at.is_null(), MessageRecord.is_active)
.join(MessageRecord, attr="record")
.join(RecordInvitee, attr="ri")
.join(Invitee)
.join(
User,
JOIN.LEFT_OUTER,
on=((User.email == Invitee.email) | ((User.orcid == Invitee.orcid))),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Task.org_id)
& (OrcidToken.scopes.contains("/activities/update"))
),
attr="token",
)
)
if record_id:
if isinstance(record_id, (list, tuple)):
tasks = tasks.where(MessageRecord.id << record_id)
else:
tasks = tasks.where(MessageRecord.id == record_id)
if max_rows:
tasks = tasks.limit(max_rows)
# Send an invitation
for task_id, records in groupby(
tasks.where(OrcidToken.id.is_null()).order_by(
Task.id, Invitee.email, Invitee.first_name, Invitee.last_name
),
lambda t: t.id,
):
task_ids.add(task_id)
invitees = set(
(
t.created_by,
t.org,
t.record.ri.invitee.email or t.record.ri.invitee.user.email,
t.record.ri.invitee.first_name or t.record.ri.invitee.user.first_name,
t.record.ri.invitee.last_name or t.record.ri.invitee.user.last_name,
)
for t in records
)
for invitee in invitees: # noqa: E501
send_user_invitation(*invitee, task_id=task_id)
# Create or update the resource record
for (task_id, task_type, org_id, user), records in groupby(
tasks.where(OrcidToken.id.is_null(False)).order_by(Task.id, User.id, MessageRecord.id),
lambda t: (t.id, t.task_type, t.org_id, t.record.ri.invitee.user),
):
# TODO: in the future - implememnt for other types:
task_ids.add(task_id)
records = list(records)
create_or_update_record_from_messages(records)
record_ids.update(r.record.id for r in records)
for record in MessageRecord.select().where(MessageRecord.id << record_ids):
# The Work record is processed for all invitees
if not (
RecordInvitee.select()
.join(Invitee)
.where(RecordInvitee.messagerecord_id == record.id, Invitee.processed_at.is_null())
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
MessageRecord.select()
.where(MessageRecord.task_id == task.id, MessageRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
MessageRecord.select()
.where(MessageRecord.task_id == task.id, MessageRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"messagerecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/task_completed.html",
subject=f"Batch Process Update: {task.filename}",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
filename=task.filename,
)
@rq.job(timeout=300)
def process_tasks(max_rows=20):
"""Handle batch task expiration.
Send a information messages about upcoming removal of the processed/uploaded tasks
based on date whichever is greater either created_at + month or updated_at + 2 weeks
and removal of expired tasks based on the expiry date.
Args:
max_rows (int): The maximum number of rows that will get processed in one go.
Returns:
int. The number of processed task records.
"""
Task.delete().where((Task.expires_at < datetime.utcnow())).execute()
tasks = Task.select().where(Task.expires_at.is_null())
if max_rows and max_rows > 0:
tasks = tasks.limit(max_rows)
for task in tasks:
max_created_at_expiry = task.created_at + timedelta(weeks=4)
max_updated_at_expiry = task.updated_at + timedelta(weeks=2)
max_expiry_date = max_created_at_expiry
if max_created_at_expiry < max_updated_at_expiry:
max_expiry_date = max_updated_at_expiry
task.expires_at = max_expiry_date
task.save()
tasks = Task.select().where(
Task.expires_at.is_null(False),
Task.expiry_email_sent_at.is_null(),
Task.expires_at < (datetime.now() + timedelta(weeks=1)),
)
if max_rows and max_rows > 0:
tasks = tasks.limit(max_rows)
for task in tasks:
if not task.task_type or task.records is None:
app.logger.error(f'Unknown task "{task}" (ID: {task.id}) task type.')
continue
if task.filename and "INTEGRATION" in task.filename:
continue
export_model = task.record_model._meta.name + ".export"
error_count = task.error_count
set_server_name()
with app.app_context():
export_url = flask.url_for(
export_model,
export_type="csv",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/task_expiration.html",
task=task,
subject="Batch process task is about to expire",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
export_url=export_url,
)
task.expiry_email_sent_at = datetime.utcnow()
task.save()
def get_client_credentials_token(org, scopes="/webhook"):
"""Request a cient credetials grant type access token and store it.
The any previously requesed with the give scope tokens will be deleted.
"""
resp = requests.post(
app.config["TOKEN_URL"],
headers={"Accept": "application/json"},
data=dict(
client_id=org.orcid_client_id,
client_secret=org.orcid_secret,
scope=scopes,
grant_type="client_credentials",
),
)
OrcidToken.delete().where(OrcidToken.org == org, OrcidToken.scopes == scopes).execute()
data = resp.json()
token = OrcidToken.create(
org=org,
access_token=data["access_token"],
refresh_token=data["refresh_token"],
scopes=data.get("scope") or scopes,
expires_in=data["expires_in"],
)
return token
@rq.job(timeout=300)
def register_orcid_webhook(user, callback_url=None, delete=False):
"""Register or delete an ORCID webhook for the given user profile update events.
If URL is given, it will be used for as call-back URL.
"""
local_handler = callback_url is None
# Don't delete the webhook if there is anyther organisation with enabled webhook:
if (
local_handler
and delete
and user.organisations.where(Organisation.webhook_enabled).count() > 1
):
return
# Any 'webhook' access token can be used:
token = (
OrcidToken.select()
.where(OrcidToken.org == user.organisation, OrcidToken.scopes == "/webhook")
.order_by(OrcidToken.id.desc())
.first()
)
if not token:
token = get_client_credentials_token(org=user.organisation, scopes="/webhook")
if local_handler:
set_server_name()
with app.app_context():
callback_url = quote(
url_for("update_webhook", user_id=user.id, _external=True, _scheme="https"),
safe="",
)
elif "/" in callback_url or ":" in callback_url:
callback_url = quote(callback_url, safe="")
url = f"{app.config['ORCID_API_HOST_URL']}{user.orcid}/webhook/{callback_url}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {token.access_token}",
"Content-Length": "0",
}
call = OrcidApiCall(method="DELETE" if delete else "PUT", url=url, query_params=headers)
resp = requests.delete(url, headers=headers) if delete else requests.put(url, headers=headers)
call.response = resp.text
call.status = resp.status_code
call.set_response_time()
call.save()
if local_handler:
user.webhook_enabled = (resp.status_code in [201, 204]) and not delete
user.save()
if resp.status_code not in [201, 204]:
raise ApiException(f"Failed to register or delete webhook {callback_url}: {resp.text}")
return resp
def notify_about_update(user, event_type="UPDATED"):
"""Notify all organisation about changes of the user."""
for org in user.organisations.where(
Organisation.webhook_enabled | Organisation.email_notifications_enabled
):
if org.webhook_enabled and org.webhook_url:
invoke_webhook_handler.queue(
org.id,
user.orcid,
user.created_at or user.updated_at,
user.updated_at or user.created_at,
event_type=event_type,
)
if org.email_notifications_enabled:
url = app.config["ORCID_BASE_URL"] + user.orcid
send_email(
f"""<p>User {user.name} (<a href="{url}" target="_blank">{user.orcid}</a>)
{"profile was updated" if event_type == "UPDATED" else "has linked their account"} at
{(user.updated_at or user.created_at).isoformat(timespec="minutes", sep=' ')}.</p>""",
recipient=org.notification_email
or (org.tech_contact.name, org.tech_contact.email),
cc_email=(org.tech_contact.name, org.tech_contact.email)
if org.notification_email
else None,
subject=f"ORCID Profile Update ({user.orcid})",
org=org,
)
@rq.job(timeout=300)
def invoke_webhook_handler(
org_id=None,
orcid=None,
created_at=None,
updated_at=None,
message=None,
event_type="UPDATED",
attempts=5,
*args,
**kwargs,
):
"""Propagate 'updated' event to the organisation event handler URL."""
if not message:
url = app.config["ORCID_BASE_URL"] + orcid
message = {"orcid": orcid, "url": url, "type": event_type}
if event_type == "CREATED" and created_at:
message["created-at"] = created_at.isoformat(timespec="seconds")
if updated_at:
message["updated-at"] = updated_at.isoformat(timespec="seconds")
if orcid:
user = (
User.select().where(User.orcid == orcid).order_by(User.id.desc()).limit(1).first()
)
if user:
message["email"] = user.email
if user.eppn:
message["eppn"] = user.eppn
if org_id:
org = Organisation.get(id=org_id)
else:
org = User.select().where(User.orcid == orcid).first().organisation
org_id = org.id
url = org.webhook_url
if org.webhook_append_orcid:
if not url.endswith("/"):
url += "/"
url += orcid
try:
app.logger.info(f"Invoking webhook: {url} with payload: {message}")
if org.webhook_apikey:
resp = requests.post(url, json=message, headers=dict(apikey=org.webhook_apikey))
else:
resp = requests.post(url, json=message)
except:
if attempts == 1:
raise
if not resp or resp.status_code // 200 != 1:
if attempts > 1:
invoke_webhook_handler.schedule(
timedelta(minutes=5 * (6 - attempts) if attempts < 6 else 5),
org_id=org_id,
message=message,
attempts=attempts - 1,
)
else:
raise Exception(f"Failed to propaged the event. Status code: {resp.status_code}")
return resp
@rq.job(timeout=300)
def enable_org_webhook(org):
"""Enable Organisation Webhook."""
org.webhook_enabled = True
org.save()
for u in org.users.where(
User.webhook_enabled.NOT(), User.orcid.is_null(False) | (User.orcid != "")
):
if u.orcid.strip():
register_orcid_webhook.queue(u)
@rq.job(timeout=300)
def disable_org_webhook(org):
"""Disable Organisation Webhook."""
org.webhook_enabled = False
org.save()
for u in org.users.where(User.webhook_enabled, User.orcid.is_null(False) | (User.orcid != "")):
if u.orcid.strip():
register_orcid_webhook.queue(u, delete=True)
def process_records(n):
"""Process first n records and run other batch tasks."""
process_affiliation_records(n)
process_funding_records(n)
process_work_records(n)
process_peer_review_records(n)
process_property_records(n)
process_other_id_records(n)
# process_tasks(n)
@rq.job(timeout=300)
def send_orcid_update_summary(org_id=None):
"""Send organisation researcher ORCID profile update summary report."""
first = date.today().replace(day=1)
previous_last = first - timedelta(days=1)
previous_first = previous_last.replace(day=1)
if org_id is None:
for o in (
Organisation.select(Organisation.id)
.distinct()
.join(UserOrg, on=UserOrg.org_id == Organisation.id)
.join(User, on=User.id == UserOrg.user_id)
.where(Organisation.webhook_enabled, Organisation.email_notifications_enabled)
.where(User.orcid_updated_at >= previous_first, User.orcid_updated_at < first)
):
send_orcid_update_summary.queue(o.id)
return
org = Organisation.select().where(Organisation.id == org_id).first()
if org and org.webhook_enabled and org.email_notifications_enabled:
updated_users = org.users.where(
User.orcid_updated_at >= previous_first, User.orcid_updated_at < first
)
recipient = org.notification_email or (org.tech_contact.name, org.tech_contact.email)
if updated_users.exists():
message_template = """<p>The flollowing user profiles were updated
from {{date_from}} until {{date_to}}:</p>
<ul>
{% for u in updated_users %}
<li>{{u.name}} ({{u.email}},
<a href="{{orcid_base_url}}{{u.orcid}}" target="_blank">{{u.orcid}}</a>,
updated at {{u.orcid_updated_at.isoformat(sep=" ", timespec="seconds")}});
</li>
{% endfor %}
</ul>
"""
set_server_name()
with app.app_context():
send_email(
message_template,
org=org,
recipient=recipient,
subject="Updated ORCID Profiles",
date_from=previous_first,
date_to=previous_last,
updated_users=updated_users,
orcid_base_url=app.config["ORCID_BASE_URL"],
)
@rq.job(timeout=300)
def sync_profile(task_id, delay=0.1):
"""Verify and sync the user profile."""
if not task_id:
return
try:
task = Task.get(task_id)
except Task.DoesNotExist:
return
org = task.org
if not org.disambiguated_id:
return
api = orcid_client.MemberAPIV3(org=org)
count = 0
for u in (
task.org.users.select(User, OrcidToken.access_token.alias("access_token"))
.where(User.orcid.is_null(False))
.join(
OrcidToken,
on=(
(OrcidToken.user_id == User.id) & OrcidToken.scopes.contains("/activities/update")
),
)
.objects()
):
Log.create(task=task_id, message=f"Processing user {u} / {u.orcid} profile.")
api.sync_profile(user=u, access_token=u.access_token, task=task)
count += 1
time.sleep(delay)
Log.create(task=task_id, message=f"In total, {count} user profiles were synchronized.")
class SafeRepresenterWithISODate(SafeRepresenter):
"""Customized representer for datetaime rendering in ISO format."""
def represent_datetime(self, data):
"""Customize datetime rendering in ISO format."""
value = data.isoformat(timespec="seconds")
return self.represent_scalar("tag:yaml.org,2002:timestamp", value)
def dump_yaml(data):
"""Dump the objects into YAML representation."""
yaml.add_representer(datetime, SafeRepresenterWithISODate.represent_datetime, Dumper=Dumper)
yaml.add_representer(defaultdict, SafeRepresenter.represent_dict)
return yaml.dump(data, allow_unicode=True)
def enqueue_user_records(user):
"""Enqueue all active and not yet processed record related to the user."""
for task in list(
Task.select().where(Task.completed_at.is_null(), Task.task_type != TaskType.SYNC)
):
func = globals().get(f"process_{task.task_type.name.lower()}_records")
records = task.records.where(
task.record_model.is_active, task.record_model.processed_at.is_null()
)
if task.task_type == TaskType.FUNDING:
records = records.join(FundingInvitee).where(
(FundingInvitee.email.is_null() | (FundingInvitee.email == user.email)),
(FundingInvitee.orcid.is_null() | (FundingInvitee.orcid == user.orcid)),
)
elif task.task_type == TaskType.PEER_REVIEW:
records = records.join(PeerReviewInvitee).where(
(PeerReviewInvitee.email.is_null() | (PeerReviewInvitee.email == user.email)),
(PeerReviewInvitee.orcid.is_null() | (PeerReviewInvitee.orcid == user.orcid)),
)
elif task.task_type == TaskType.WORK:
records = records.join(WorkInvitee).where(
(WorkInvitee.email.is_null() | (WorkInvitee.email == user.email)),
(WorkInvitee.orcid.is_null() | (WorkInvitee.orcid == user.orcid)),
)
elif task.task_type == TaskType.RESOURCE and task.is_raw:
invitee_model = task.record_model.invitees.rel_model
records = (
records.join(RecordInvitee)
.join(Invitee)
.where(
(invitee_model.email.is_null() | (invitee_model.email == user.email)),
(invitee_model.orcid.is_null() | (invitee_model.orcid == user.orcid)),
)
)
else:
records = records.where(
(task.record_model.email.is_null() | (task.record_model.email == user.email)),
(task.record_model.orcid.is_null() | (task.record_model.orcid == user.orcid)),
)
record_ids = [r.id for r in records]
if record_ids:
if task.task_type == TaskType.AFFILIATION:
func.queue(record_id=record_ids)
else:
for record_id in record_ids:
func.queue(record_id=record_id)
def enqueue_task_records(task):
"""Enqueue all active and not yet processed record."""
records = task.records.where(
task.record_model.is_active, task.record_model.processed_at.is_null()
)
if task.is_raw:
return process_message_records.queue(record_id=[r.id for r in records])
func = globals().get(f"process_{task.task_type.name.lower()}_records")
if task.task_type in [TaskType.AFFILIATION, TaskType.PROPERTY]:
records = records.order_by(task.record_model.email, task.record_model.orcid)
for _, chunk in groupby(records, lambda r: (r.email, r.orcid)):
func.queue(record_id=[r.id for r in chunk])
else:
for r in records:
func.queue(record_id=r.id)
def activate_all_records(task):
"""Activate all submitted task records and enqueue it for processing."""
with db.atomic():
try:
status = "The record was activated at " + datetime.now().isoformat(timespec="seconds")
count = (
task.record_model.update(is_active=True, status=status)
.where(
task.record_model.task == task,
(
task.record_model.is_active.is_null()
| (task.record_model.is_active == False) # noqa: E712
),
)
.execute()
) # noqa: E712
task.status = "ACTIVE"
task.save()
enqueue_task_records(task)
except:
db.rollback()
app.logger.exception("Failed to activate the selected records")
raise
return count
def reset_all_records(task):
"""Batch reset of batch records."""
count = 0
with db.atomic():
try:
status = "The record was reset at " + datetime.now().isoformat(timespec="seconds")
tt = task.task_type
if tt in [TaskType.AFFILIATION, TaskType.PROPERTY, TaskType.OTHER_ID]:
count = (
task.record_model.update(processed_at=None, status=status)
.where(
task.record_model.task_id == task.id,
task.record_model.is_active == True, # noqa: E712
)
.execute()
) # noqa: E712
else:
for record in task.records.where(
task.record_model.is_active == True # noqa: E712
): # noqa: E712
record.processed_at = None
record.status = status
if hasattr(record, "invitees"):
invitee_class = record.invitees.model
invitee_class.update(processed_at=None, status=status).where(
(invitee_class.id << [i.id for i in record.invitees])
if task.is_raw
else (invitee_class.record == record.id)
).execute()
record.save()
count = count + 1
UserInvitation.delete().where(UserInvitation.task == task).execute()
enqueue_task_records(task)
except:
db.rollback()
app.logger.exception("Failed to reset the selected records")
raise
else:
task.expires_at = None
task.expiry_email_sent_at = None
task.completed_at = None
task.status = "RESET"
task.save()
return count
def plural(word):
"""Convert a reguralr noun to its regular plural form."""
if word.endswith("fe"):
# wolf -> wolves
return word[:-2] + "ves"
elif word.endswith("f"):
# knife -> knives
return word[:-1] + "ves"
elif word.endswith("o"):
# potato -> potatoes
return word + "es"
elif word.endswith("us"):
# cactus -> cacti
return word[:-2] + "i"
elif word.endswith("ion"):
# criterion -> criteria
return word + "s"
elif word.endswith("on"):
# criterion -> criteria
return word[:-2] + "a"
elif word.endswith("y"):
# community -> communities
return word[:-1] + "ies"
elif word[-1] in "sx" or word[-2:] in ["sh", "ch"]:
return word + "es"
elif word.endswith("an"):
return word[:-2] + "en"
else:
return word + "s"
| 37.647445 | 119 | 0.528808 | # -*- coding: utf-8 -*-
"""Various utilities."""
import json
import logging
import os
import random
import string
import time
from collections import defaultdict
from datetime import date, datetime, timedelta
from itertools import filterfalse, groupby
from urllib.parse import quote, urlencode, urlparse
import emails
import flask
import requests
import yaml
from flask import request, url_for
from flask_login import current_user
from html2text import html2text
from jinja2 import Template
from peewee import JOIN
from yaml.dumper import Dumper
from yaml.representer import SafeRepresenter
from orcid_api_v3.rest import ApiException
from . import app, db, orcid_client, rq
from .models import (
AFFILIATION_TYPES,
Affiliation,
AffiliationRecord,
Delegate,
FundingInvitee,
FundingRecord,
Invitee,
Log,
MailLog,
MessageRecord,
NestedDict,
OrcidApiCall,
OrcidToken,
Organisation,
OrgInvitation,
OtherIdRecord,
PartialDate,
PeerReviewExternalId,
PeerReviewInvitee,
PeerReviewRecord,
PropertyRecord,
RecordInvitee,
ResourceRecord,
Role,
Task,
TaskType,
User,
UserInvitation,
UserOrg,
WorkInvitee,
WorkRecord,
get_val,
readup_file,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
EDU_CODES = {"student", "edu", "education"}
EMP_CODES = {"faculty", "staff", "emp", "employment"}
DST_CODES = {"distinction", "dist", "dst"}
INV_POS_CODES = {"invited-position", "position"}
QUA_CODES = {"qualification", "qua"}
MEM_CODES = {"membership", "mem"}
SER_CODES = {"service", "ser"}
ENV = app.config.get("ENV")
EXTERNAL_SP = app.config.get("EXTERNAL_SP")
def get_next_url(endpoint=None):
"""Retrieve and sanitize next/return URL."""
_next = (
request.args.get("next")
or request.args.get("_next")
or request.args.get("url")
or request.referrer
)
if not _next and endpoint:
_next = url_for(endpoint)
if _next:
if _next.startswith("/"):
return _next
try:
csrf = urlparse(_next).netloc
if (
csrf == urlparse(app.config.get("APP_URL")).netloc
or csrf.startswith("127.0.")
or csrf in app.config.get("CSRF_DOMAINS")
):
return _next
except:
pass
try:
if Delegate.select().where(Delegate.hostname ** f"%{urlparse(_next).netloc}%").exists():
return _next
except:
pass
return None
def is_valid_url(url):
"""Validate URL (expexted to have a path)."""
try:
result = urlparse(url)
return result.scheme and result.netloc and (result.path or result.path == "")
except:
return False
def read_uploaded_file(form):
"""Read up the whole content and deconde it and return the whole content."""
if "file_" not in request.files:
return
content = readup_file(request.files[form.file_.name])
if content:
return content
raise ValueError("Unable to decode encoding.")
def send_email(
template,
recipient,
cc_email=None,
sender=(app.config.get("APP_NAME"), app.config.get("MAIL_DEFAULT_SENDER")),
reply_to=app.config.get("MAIL_SUPPORT_ADDRESS"),
subject=None,
base=None,
logo=None,
org=None,
**kwargs,
):
"""Send an email, acquiring its payload by rendering a jinja2 template.
:type template: :class:`str`
:param subject: the subject of the email
:param base: the base template of the email messagess
:param template: name of the template file in ``templates/emails`` to use
:type recipient: :class:`tuple` (:class:`str`, :class:`str`)
:param recipient: 'To' (name, email) or just an email address
:type sender: :class:`tuple` (:class:`str`, :class:`str`)
:param sender: 'From' (name, email)
:param org: organisation on which behalf the email is sent
* `recipient` and `sender` are made available to the template as variables
* In any email tuple, name may be ``None``
* The subject is retrieved from a sufficiently-global template variable;
typically set by placing something like
``{% set subject = "My Subject" %}``
at the top of the template used (it may be inside some blocks
(if, elif, ...) but not others (rewrap, block, ...).
If it's not present, it defaults to "My Subject".
* With regards to line lengths: :class:`email.mime.text.MIMEText` will
(at least, in 2.7) encode the body of the text in base64 before sending
it, text-wrapping the base64 data. You will therefore not have any
problems with SMTP line length restrictions, and any concern to line
lengths is purely aesthetic or to be nice to the MUA.
:class:`RewrapExtension` may be used to wrap blocks of text nicely.
Note that ``{{ variables }}`` in manually wrapped text can cause
problems!
"""
if not org and current_user and not current_user.is_anonymous:
org = current_user.organisation
app = flask.current_app
jinja_env = flask.current_app.jinja_env
if logo is None:
if org and org.logo:
logo = url_for("logo_image", token=org.logo.token, _external=True)
else:
logo = url_for("static", filename="images/banner-small.png", _external=True)
if not base and org:
if org.email_template_enabled and org.email_template:
base = org.email_template
if not base:
base = app.config.get("DEFAULT_EMAIL_TEMPLATE")
jinja_env = jinja_env.overlay(autoescape=False)
def _jinja2_email(name, email):
if name is None:
hint = "name was not set for email {0}".format(email)
name = jinja_env.undefined(name="name", hint=hint)
return {"name": name, "email": email}
if "\n" not in template and template.endswith(".html"):
template = jinja_env.get_template(template)
else:
template = Template(template)
kwargs["sender"] = _jinja2_email(*sender)
if isinstance(recipient, str):
recipient = (recipient, recipient)
kwargs["recipient"] = _jinja2_email(*recipient)
if subject is not None:
kwargs["subject"] = subject
rendered = template.make_module(vars=kwargs)
if subject is None:
subject = getattr(rendered, "subject", "Welcome to the NZ ORCID Hub")
html_msg = base.format(
EMAIL=kwargs["recipient"]["email"],
SUBJECT=subject,
MESSAGE=str(rendered),
LOGO=logo,
BASE_URL=url_for("index", _external=True)[:-1],
INCLUDED_URL=kwargs.get("invitation_url", "") or kwargs.get("include_url", ""),
)
plain_msg = html2text(html_msg)
msg = emails.html(
subject=subject,
mail_from=(app.config.get("APP_NAME", "ORCID Hub"), app.config.get("MAIL_DEFAULT_SENDER")),
html=html_msg,
text=plain_msg,
)
dkim_key_path = app.config.get("DKIM_KEY_PATH")
dkim_domain = app.config.get("MAIL_DKIM_DOMAIN")
dkim_selector = app.config.get("MAIL_DKIM_SELECTOR")
if dkim_key_path and os.path.exists(dkim_key_path):
with open(dkim_key_path) as key_file:
msg.dkim(key=key_file, domain=dkim_domain, selector=dkim_selector)
elif dkim_key_path:
raise Exception(f"Cannot find DKIM key file: {dkim_key_path}!")
if cc_email:
msg.cc.append(cc_email)
msg.mail_to.append(recipient)
# Unsubscribe link:
token = new_invitation_token(length=10)
unsubscribe_url = url_for("unsubscribe", token=token, _external=True)
headers = {
"x-auto-response-suppress": "DR, RN, NRN, OOF",
"auto-submitted": "auto-generated",
"List-Unsubscribe": f"<{unsubscribe_url}>",
}
if reply_to:
headers["reply-to"] = reply_to
msg.set_headers(headers)
smtp = dict(host=app.config["MAIL_SERVER"], port=app.config["MAIL_PORT"])
if "MAIL_PORT" in app.config:
smtp["port"] = app.config["MAIL_PORT"]
if "MAIL_USE_TLS" in app.config:
smtp["tls"] = app.config["MAIL_USE_TLS"]
if "MAIL_USERNAME" in app.config:
smtp["user"] = app.config["MAIL_USERNAME"]
if "MAIL_PASSWORD" in app.config:
smtp["password"] = app.config["MAIL_PASSWORD"]
resp = msg.send(smtp=smtp)
MailLog.create(
org=org,
recipient=recipient[1],
sender=sender[1],
subject=subject,
was_sent_successfully=resp.success,
error=resp.error,
token=token,
)
if not resp.success:
raise Exception(
f"Failed to email the message: {resp.error}. Please contact a Hub administrator!"
)
def new_invitation_token(length=5):
"""Generate a unique invitation token."""
while True:
token = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
if not (
UserInvitation.select().where(UserInvitation.token == token).exists()
or OrgInvitation.select().where(OrgInvitation.token == token).exists()
or MailLog.select().where(MailLog.token == token).exists()
):
break
return token
def append_qs(url, **qs):
"""Append new query strings to an arbitraty URL."""
return url + ("&" if urlparse(url).query else "?") + urlencode(qs, doseq=True)
def track_event(category, action, label=None, value=0):
"""Track application events with Google Analytics."""
ga_tracking_id = app.config.get("GA_TRACKING_ID")
if not ga_tracking_id:
return
data = {
"v": "1", # API Version.
"tid": ga_tracking_id, # Tracking ID / Property ID.
# Anonymous Client Identifier. Ideally, this should be a UUID that
# is associated with particular user, device, or browser instance.
"cid": current_user.uuid,
"t": "event", # Event hit type.
"ec": category, # Event category.
"ea": action, # Event action.
"el": label, # Event label.
"ev": value, # Event value, must be an integer
}
response = requests.post("http://www.google-analytics.com/collect", data=data)
# If the request fails, this will raise a RequestException. Depending
# on your application's needs, this may be a non-error and can be caught
# by the caller.
response.raise_for_status()
# Returning response only for test, but can be used in application for some other reasons
return response
def set_server_name():
"""Set the server name for batch processes."""
if not app.config.get("SERVER_NAME"):
if EXTERNAL_SP:
app.config["SERVER_NAME"] = "127.0.0.1:5000"
else:
app.config["SERVER_NAME"] = (
"orcidhub.org.nz" if ENV == "prod" else ENV + ".orcidhub.org.nz"
)
def is_org_rec(org, rec):
"""Test if the record was authoritized by the organisation."""
client_id = org.orcid_client_id
source_client_id = rec.get("source").get("source-client-id")
return source_client_id and source_client_id.get("path") == client_id
def create_or_update_work(user, org_id, records, *args, **kwargs):
"""Create or update work record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("activities-summary")
works = []
for r in activities.get("works").get("group"):
ws = r.get("work-summary")[0]
if is_org_rec(org, ws):
works.append(ws)
taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code}
def match_put_code(records, record, invitee):
"""Match and assign put-code to a single work record and the existing ORCID records."""
if invitee.put_code:
return
for r in records:
put_code = r.get("put-code")
if put_code in taken_put_codes:
continue
if (
record.title
and record.type
and (r.get("title", "title", "value", default="") or "").lower()
== record.title.lower()
and (r.get("type", default="") or "").lower() == record.type.lower()
):
invitee.put_code = put_code
invitee.visibility = r.get("visibility")
invitee.save()
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the work record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
wr = task_by_user.record
wi = task_by_user.record.invitee
match_put_code(works, wr, wi)
for task_by_user in records:
wi = task_by_user.record.invitee
try:
put_code, orcid, created, visibility = api.create_or_update_work(task_by_user)
if created:
wi.add_status_line("Work record was created.")
else:
wi.add_status_line("Work record was updated.")
wi.orcid = orcid
wi.put_code = put_code
if wi.visibility != visibility:
wi.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
exception_msg = json.loads(ex.body) if hasattr(ex, "body") else str(ex)
wi.add_status_line(f"Exception occured processing the record: {exception_msg}.")
wr.add_status_line(
f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}."
)
finally:
wi.processed_at = datetime.utcnow()
wr.save()
wi.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_peer_review(user, org_id, records, *args, **kwargs):
"""Create or update peer review record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
if profile_record:
peer_reviews = [
s
for ag in profile_record.get("activities-summary", "peer-reviews", "group", default=[])
for pg in ag.get("peer-review-group", default=[])
for s in pg.get("peer-review-summary", default=[])
if is_org_rec(org, s)
]
taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code}
def match_put_code(records, record, invitee, taken_external_id_values):
"""Match and assign put-code to a single peer review record and the existing ORCID records."""
if invitee.put_code:
return
for r in records:
put_code = r.get("put-code")
external_id_value = (
r.get("external-ids").get("external-id")[0].get("external-id-value")
if r.get("external-ids")
and r.get("external-ids").get("external-id")
and r.get("external-ids").get("external-id")[0].get("external-id-value")
else None
)
if put_code in taken_put_codes:
continue
if (
record.review_group_id
and external_id_value in taken_external_id_values
and (r.get("review-group-id", default="") or "").lower()
== record.review_group_id.lower()
): # noqa: E127
invitee.put_code = put_code
invitee.save()
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the peer review record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
pr = task_by_user.record
pi = pr.invitee
external_ids = PeerReviewExternalId.select().where(
PeerReviewExternalId.record_id == pr.id
)
taken_external_id_values = {ei.value for ei in external_ids if ei.value}
match_put_code(peer_reviews, pr, pi, taken_external_id_values)
for task_by_user in records:
pr = task_by_user.record
pi = pr.invitee
try:
put_code, orcid, created, visibility = api.create_or_update_peer_review(
task_by_user
)
if created:
pi.add_status_line("Peer review record was created.")
else:
pi.add_status_line("Peer review record was updated.")
pi.orcid = orcid
pi.put_code = put_code
if pi.visibility != visibility:
pi.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
exception_msg = json.loads(ex.body) if hasattr(ex, "body") else str(ex)
pi.add_status_line(f"Exception occured processing the record: {exception_msg}.")
pr.add_status_line(
f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}."
)
finally:
pi.processed_at = datetime.utcnow()
pr.save()
pi.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_funding(user, org_id, records, *args, **kwargs):
"""Create or update funding record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("activities-summary")
fundings = []
for r in activities.get("fundings").get("group"):
fs = r.get("funding-summary")[0]
if is_org_rec(org, fs):
fundings.append(fs)
taken_put_codes = {r.record.invitee.put_code for r in records if r.record.invitee.put_code}
def match_put_code(records, record, invitee):
"""Match and asign put-code to a single funding record and the existing ORCID records."""
if invitee.put_code:
return
for r in records:
put_code = r.get("put-code")
if put_code in taken_put_codes:
continue
if (
record.title
and record.type
and record.org_name
and (r.get("title", "title", "value", default="") or "").lower()
== record.title.lower()
and (r.get("type", default="") or "").lower() == record.type.lower()
and (r.get("organization", "name", default="") or "").lower()
== record.org_name.lower()
):
invitee.put_code = put_code
invitee.visibility = r.get("visibility")
invitee.save()
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the funding record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
fr = task_by_user.record
fi = task_by_user.record.invitee
match_put_code(fundings, fr, fi)
for task_by_user in records:
fi = task_by_user.record.invitee
try:
put_code, orcid, created, visibility = api.create_or_update_funding(task_by_user)
if created:
fi.add_status_line("Funding record was created.")
else:
fi.add_status_line("Funding record was updated.")
fi.orcid = orcid
fi.put_code = put_code
if fi.visibility != visibility:
fi.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
if ex and hasattr(ex, "body"):
exception_msg = json.loads(ex.body)
else:
exception_msg = str(ex)
fi.add_status_line(f"Exception occured processing the record: {exception_msg}.")
fr.add_status_line(
f"Error processing record. Fix and reset to enable this record to be processed: {exception_msg}."
)
finally:
fi.processed_at = datetime.utcnow()
fr.save()
fi.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_resources(user, org_id, records, *args, **kwargs):
"""Create or update research resource record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(org_id)
token = (
OrcidToken.select(OrcidToken.access_token)
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org.id,
OrcidToken.scopes.contains("/activities/update"),
)
.first()
)
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
resources = api.get_resources()
if resources:
resources = resources.get("group")
resources = [
r
for r in resources
if any(
rr.get("source", "source-client-id", "path") == org.orcid_client_id
for rr in r.get("research-resource-summary")
)
]
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
def match_record(records, record):
"""Match and assign put-code to the existing ORCID records."""
if record.put_code:
return record.put_code
for r in records:
if all(
eid.get("external-id-value") != record.proposal_external_id_value
for eid in r.get("external-ids", "external-id")
):
continue
for rr in r.get("research-resource-summary"):
put_code = rr.get("put-code")
# if all(eid.get("external-id-value") != record.external_id_value
# for eid in rr.get("proposal", "external-ids", "external-id")):
# continue
if put_code in taken_put_codes:
continue
record.put_code = put_code
if not record.visibility:
record.visibility = r.get("visibility")
if not record.display_index:
record.display_index = r.get("display-index")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the other id record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
return put_code
for t in records:
try:
rr = t.record
put_code = match_record(resources, rr)
if put_code:
resp = api.put(f"research-resource/{put_code}", rr.orcid_research_resource)
else:
resp = api.post("research-resource", rr.orcid_research_resource)
if resp.status == 201:
orcid, put_code = resp.headers["Location"].split("/")[-3::2]
rr.add_status_line("ORCID record was created.")
else:
orcid = user.orcid
rr.add_status_line("ORCID record was updated.")
if not rr.put_code and put_code:
rr.put_code = int(put_code)
if not rr.orcid and orcid:
rr.orcid = orcid
visibility = (
json.loads(resp.data).get("visibility")
if hasattr(resp, "data") and resp.data
else None
)
if rr.visibility != visibility:
rr.visibility = visibility
except ApiException as ex:
if ex.status == 404:
rr.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.processed_at = datetime.utcnow()
rr.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_record_from_messages(records, *args, **kwargs):
"""Create or update ORCID record of a user form the given message.
:param records: iterator with records for a single profile.
"""
records = list(unique_everseen(records, key=lambda t: t.record.id))
if not records:
return
# add a few shortcuts:
for r in records:
r.record.msg = json.loads(r.record.message, object_pairs_hook=NestedDict)
r.record.invitee = r.record.ri.invitee
rec0 = records[0]
org = rec0.org
user = rec0.record.invitee.user
token = user.token
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
resources = api.get_resources()
if resources:
resources = resources.get("group")
resources = [
r
for r in resources
if any(
rr.get("source", "source-client-id", "path") == org.orcid_client_id
for rr in r.get("research-resource-summary")
)
]
taken_put_codes = {
r.record.ri.invitee.put_code for r in records if r.record.ri.invitee.put_code
}
def match_record(resource, record):
"""Match and assign put-code to the existing ORCID records."""
put_code = record.invitee.put_code
if put_code:
for rr in (rr for r in resources for rr in r.get("research-resource-summary")):
if rr.get("put-code") == put_code:
record.invitee.visibility = rr.get("visibility")
break
return put_code
external_ids = record.msg.get("proposal", "external-ids", "external-id", default=[])
for r in resource:
res_external_ids = r.get("external-ids", "external-id")
if all(
eid.get("external-id-value") != rec_eid.get("external-id-value")
for eid in res_external_ids
for rec_eid in external_ids
):
continue
for rr in r.get("research-resource-summary"):
proposal_external_ids = record.msg.get(
"proposal", "external-ids", "external-id"
)
if all(
eid.get("external-id-value") != rec_eid.get("external-id-value")
for rec_eid in proposal_external_ids
for eid in rr.get("proposal", "external-ids", "external-id")
):
continue
put_code = rr.get("put-code")
if put_code in taken_put_codes:
continue
record.invitee.put_code = put_code
record.ri.invitee.visibility = rr.get("visibility")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
return put_code
for t in records:
try:
rr = t.record
if not rr.invitee.orcid:
rr.invitee.orcid = user.orcid
put_code = match_record(resources, rr)
if "visibility" in rr.msg:
del rr.msg["visibility"]
if put_code:
rr.msg["put-code"] = put_code
resp = api.put(f"research-resource/{put_code}", rr.msg)
else:
resp = api.post("research-resource", rr.msg)
if resp.status == 201:
rr.invitee.add_status_line("ORCID record was created.")
else:
rr.invitee.add_status_line("ORCID record was updated.")
if not put_code:
location = resp.headers["Location"]
rr.invitee.put_code = location.split("/")[-1]
rec = api.get(location)
rr.invitee.visibility = rec.json.get("visibility")
except ApiException as ex:
if ex.status == 404:
rr.invitee.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.invitee.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.invitee.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.invitee.processed_at = datetime.utcnow()
rr.invitee.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
@rq.job(timeout=300)
def send_user_invitation(
inviter,
org,
email=None,
first_name=None,
last_name=None,
user=None,
task_id=None,
task_type=None,
affiliation_types=None,
orcid=None,
department=None,
organisation=None,
city=None,
region=None,
country=None,
course_or_role=None,
start_date=None,
end_date=None,
affiliations=Affiliation.NONE,
disambiguated_id=None,
disambiguation_source=None,
cc_email=None,
invitation_template=None,
**kwargs,
):
"""Send an invitation to join ORCID Hub logging in via ORCID."""
try:
if not email:
if user and user.email:
email = user.email
else:
raise Exception(
"Failed to find the email address for the record. Cannot send an invitation."
)
else:
email = email.lower()
if isinstance(inviter, int):
inviter = User.get(id=inviter)
if isinstance(org, int):
org = Organisation.get(id=org)
if isinstance(start_date, list):
start_date = PartialDate(*start_date)
if isinstance(end_date, list):
end_date = PartialDate(*end_date)
set_server_name()
task_type = task_type or (Task.get(task_id).task_type if task_id else TaskType.AFFILIATION)
if not invitation_template:
if task_type != TaskType.AFFILIATION:
invitation_template = f"email/{task_type.name.lower()}_invitation.html"
else:
invitation_template = "email/researcher_invitation.html"
if task_type == TaskType.AFFILIATION:
logger.info(
f"*** Sending an invitation to '{first_name} {last_name} <{email}>' "
f"submitted by {inviter} of {org} for affiliations: {affiliation_types}"
)
else:
logger.info(
f"*** Sending an invitation to '{first_name} <{email}>' "
f"submitted by {inviter} of {org}"
)
email = email.lower()
if not user or not user.id:
user, user_created = User.get_or_create(email=email)
if user_created:
user.organisation = org
user.created_by = inviter.id
user.first_name = first_name or "N/A"
user.last_name = last_name or "N/A"
else:
user.updated_by = inviter.id
if first_name and not user.first_name:
user.first_name = first_name
if last_name and not user.last_name:
user.last_name = last_name
if not first_name:
first_name = user.first_name
if not last_name:
last_name = user.last_name
user.roles |= Role.RESEARCHER
token = new_invitation_token()
with app.app_context():
invitation_url = flask.url_for(
"orcid_login",
invitation_token=token,
_external=True,
_scheme="http" if app.debug else "https",
)
send_email(
invitation_template,
recipient=(org.name if org else user.organisation.name, user.email),
reply_to=f"{inviter.name} <{inviter.email}>",
cc_email=cc_email,
invitation_url=invitation_url,
org_name=org.name if org else user.organisation.name,
org=org,
user=user,
)
user.save()
user_org, user_org_created = UserOrg.get_or_create(user=user, org=org)
if user_org_created:
user_org.created_by = inviter.id
if not affiliations and affiliation_types:
if affiliation_types & EMP_CODES:
affiliations |= Affiliation.EMP
if affiliation_types & EDU_CODES:
affiliations |= Affiliation.EDU
user_org.affiliations = affiliations
else:
user_org.updated_by = inviter.id
user_org.save()
ui = UserInvitation.create(
task_id=task_id,
invitee_id=user.id,
inviter_id=inviter.id,
org=org,
email=email,
first_name=first_name,
last_name=last_name,
orcid=orcid,
department=department,
organisation=organisation,
city=city,
region=region,
country=country,
course_or_role=course_or_role,
start_date=start_date,
end_date=end_date,
affiliations=affiliations,
disambiguated_id=disambiguated_id,
disambiguation_source=disambiguation_source,
token=token,
)
status = "The invitation sent at " + datetime.utcnow().isoformat(timespec="seconds")
if task_type == TaskType.AFFILIATION:
(
AffiliationRecord.update(status=AffiliationRecord.status + "\n" + status)
.where(
AffiliationRecord.status.is_null(False),
AffiliationRecord.task_id == task_id,
AffiliationRecord.email == email,
)
.execute()
)
(
AffiliationRecord.update(status=status)
.where(
AffiliationRecord.task_id == task_id,
AffiliationRecord.status.is_null(),
AffiliationRecord.email == email,
)
.execute()
)
elif task_type in [TaskType.FUNDING, TaskType.WORK, TaskType.PEER_REVIEW]:
task = Task.get(task_id)
for record in task.records.where(task.record_model.is_active):
invitee_class = record.invitees.model
invitee_class.update(status=status).where(
invitee_class.record == record.id, invitee_class.email == email
).execute()
return ui
except Exception as ex:
logger.exception(f"Exception occured while sending mail {ex}")
raise
def unique_everseen(iterable, key=None):
"""List unique elements, preserving order. Remember all elements ever seen.
The snippet is taken form https://docs.python.org/3.6/library/itertools.html#itertools-recipes
>>> unique_everseen('AAAABBBCCDAABBB')
A B C D
>>> unique_everseen('ABBCcAD', str.lower)
A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def create_or_update_properties(user, org_id, records, *args, **kwargs):
"""Create or update researcher property records of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(org_id)
profile_record = None
token = (
OrcidToken.select(OrcidToken.access_token)
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org.id,
OrcidToken.scopes.contains("/person/update"),
)
.first()
)
if token:
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("person")
researcher_urls = [
r
for r in (activities.get("researcher-urls", "researcher-url", default=[]))
if is_org_rec(org, r)
]
other_names = [
r
for r in (activities.get("other-names", "other-name", default=[]))
if is_org_rec(org, r)
]
keywords = [
r for r in (activities.get("keywords", "keyword", default=[])) if is_org_rec(org, r)
]
countries = [
r for r in (activities.get("addresses", "address", default=[])) if is_org_rec(org, r)
]
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
def match_put_code(record):
"""Match and assign put-code to the existing ORCID records."""
for r in (
researcher_urls
if record.type == "URL"
else other_names
if record.type == "NAME"
else countries
if record.type == "COUNTRY"
else keywords
):
try:
orcid, put_code = r.get("path").split("/")[-3::2]
except Exception:
app.logger.exception("Failed to get ORCID iD/put-code from the response.")
raise Exception("Failed to get ORCID iD/put-code from the response.")
if record.put_code:
return
if put_code in taken_put_codes:
continue
if (
record.value
and (
record.name
and record.type == "URL"
and (r.get("url-name", default="") or "").lower() == record.name.lower()
and (r.get("url", "value", default="") or "").lower()
== record.value.lower()
)
or (
record.type in ["NAME", "KEYWORD"]
and (r.get("content", default="") or "").lower() == record.value.lower()
)
or (
record.type == "COUNTRY"
and (r.get("country", "value", default="") or "").lower()
== record.value.lower()
)
): # noqa: E129
record.put_code = put_code
record.orcid = orcid
record.visibility = r.get("visibility")
if not record.display_index:
record.display_index = r.get("display-index")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the record (ID: {record.id}, Task ID: {record.task_id})"
)
break
for rr in (t.record for t in records):
try:
match_put_code(rr)
if rr.type == "URL":
put_code, orcid, created, visibility = api.create_or_update_researcher_url(
**rr.__data__
)
elif rr.type == "NAME":
put_code, orcid, created, visibility = api.create_or_update_other_name(
**rr.__data__
)
elif rr.type == "COUNTRY":
put_code, orcid, created, visibility = api.create_or_update_address(
**rr.__data__
)
else:
put_code, orcid, created, visibility = api.create_or_update_keyword(
**rr.__data__
)
if created:
rr.add_status_line(f"Researcher {rr.type} record was created.")
else:
rr.add_status_line(f"Researcher {rr.type} record was updated.")
rr.orcid = orcid
rr.put_code = put_code
if rr.visibility != visibility:
rr.visibility = visibility
except ApiException as ex:
if ex.status == 404:
rr.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.processed_at = datetime.utcnow()
rr.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
# TODO: delete
def create_or_update_other_id(user, org_id, records, *args, **kwargs):
"""Create or update Other Id record of a user."""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
profile_record = None
token = (
OrcidToken.select(OrcidToken.access_token)
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org.id,
OrcidToken.scopes.contains("/person/update"),
)
.first()
)
if token:
api = orcid_client.MemberAPIV3(org, user, access_token=token.access_token)
profile_record = api.get_record()
if profile_record:
activities = profile_record.get("person")
other_id_records = [
r
for r in (activities.get("external-identifiers").get("external-identifier"))
if is_org_rec(org, r)
]
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
def match_put_code(records, record):
"""Match and assign put-code to the existing ORCID records."""
for r in records:
try:
orcid, put_code = r.get("path").split("/")[-3::2]
except Exception:
app.logger.exception("Failed to get ORCID iD/put-code from the response.")
raise Exception("Failed to get ORCID iD/put-code from the response.")
if record.put_code:
return
if put_code in taken_put_codes:
continue
if (
record.type
and record.value
and (r.get("external-id-type", default="") or "").lower()
== record.type.lower()
and (r.get("external-id-value", default="") or "").lower()
== record.value.lower()
):
record.put_code = put_code
record.orcid = orcid
record.visibility = r.get("visibility")
if not record.display_index:
record.display_index = r.get("display-index")
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the other id record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
try:
rr = task_by_user.record
match_put_code(other_id_records, rr)
put_code, orcid, created, visibility = api.create_or_update_person_external_id(
**rr.__data__
)
if created:
rr.add_status_line("Other ID record was created.")
else:
rr.add_status_line("Other ID record was updated.")
rr.orcid = orcid
rr.put_code = put_code
if rr.visibility != visibility:
rr.visibility = visibility
except ApiException as ex:
if ex.status == 404:
rr.put_code = None
elif ex.status == 401:
token.delete_instance()
logger.exception(f"Exception occured {ex}")
rr.add_status_line(f"ApiException: {ex}")
except Exception as ex:
logger.exception(f"For {user} encountered exception")
rr.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
rr.processed_at = datetime.utcnow()
rr.save()
else:
# TODO: Invitation resend in case user revokes organisation permissions
app.logger.debug("Should resend an invite to the researcher asking for permissions")
return
def create_or_update_affiliations(user, org_id, records, *args, **kwargs):
"""Create or update affiliation record of a user.
1. Retries user edurcation and employment summamy from ORCID;
2. Match the recodrs with the summary;
3. If there is match update the record;
4. If no match create a new one.
"""
records = list(unique_everseen(records, key=lambda t: t.record.id))
org = Organisation.get(id=org_id)
api = orcid_client.MemberAPIV3(org, user)
profile_record = api.get_record()
orcid_affiliation_types = [
"employment",
"education",
"distinction",
"membership",
"service",
"qualification",
"invited-position",
]
if profile_record:
affiliations = {
at: [
s.get(f"{at}-summary")
for ag in profile_record.get(
"activities-summary", f"{at}s", "affiliation-group", default=[]
)
for s in ag.get("summaries", default=[])
if is_org_rec(org, s.get(f"{at}-summary"))
]
for at in orcid_affiliation_types
}
taken_put_codes = {r.record.put_code for r in records if r.record.put_code}
put_codes = {at: [e["put-code"] for e in records] for at, records in affiliations.items()}
def match_put_code(records, record):
"""Match and asign put-code to a single affiliation record and the existing ORCID records."""
for r in records:
try:
orcid, rec_type, put_code = r.get("path").split("/")[-3:]
except Exception:
app.logger.exception("Failed to get ORCID iD/put-code from the response.")
raise Exception("Failed to get ORCID iD/put-code from the response.")
start_date = record.start_date.as_orcid_dict() if record.start_date else None
end_date = record.end_date.as_orcid_dict() if record.end_date else None
if (
r.get("start-date") == start_date
and r.get("end-date") == end_date
and (rec_type == "education" or r.get("department-name") == record.department)
and r.get("role-title") == record.role
and get_val(r, "organization", "name") == record.organisation
and get_val(r, "organization", "address", "city") == record.city
and get_val(r, "organization", "address", "region") == record.region
and get_val(r, "organization", "address", "country") == record.country
and get_val(
r,
"organization",
"disambiguated-organization",
"disambiguated-organization-identifier",
)
== record.disambiguated_id
and get_val(
r, "organization", "disambiguated-organization", "disambiguation-source"
)
== record.disambiguation_source
):
record.put_code = put_code
record.orcid = orcid
record.visibility = r.get("visibility")
return True
if record.put_code:
return
if put_code in taken_put_codes:
continue
if (
# pure vanilla:
(
r.get("start-date") is None
and r.get("end-date") is None
and r.get("department-name") is None
and r.get("role-title") is None
)
or
# partial match
(
(
# for 'edu' records department and start-date can be missing:
rec_type == "education"
or (
record.department
and r.get("start-date") == start_date
and r.get("department-name", default="").lower()
== record.department.lower()
)
)
and record.role
and r.get("role-title", default="".lower() == record.role.lower())
)
):
record.visibility = r.get("visibility")
record.put_code = put_code
record.orcid = orcid
taken_put_codes.add(put_code)
app.logger.debug(
f"put-code {put_code} was asigned to the affiliation record "
f"(ID: {record.id}, Task ID: {record.task_id})"
)
break
for task_by_user in records:
try:
ar = task_by_user.record
at = ar.affiliation_type.lower() if ar.affiliation_type else None
no_orcid_call = False
if ar.delete_record and profile_record:
try:
for at in orcid_affiliation_types:
if ar.put_code in put_codes[at]:
getattr(api, f"delete_{at}v3")(user.orcid, ar.put_code)
app.logger.info(
f"ORCID record of {user} with put-code {ar.put_code} was deleted."
)
break
else:
ar.add_status_line(
f"There is no record with the given put-code {ar.put_code} in the user {user} profile."
)
except Exception as ex:
ar.add_status_line(f"Exception occured processing the record: {ex}.")
ar.processed_at = datetime.utcnow()
ar.save()
continue
if at in EMP_CODES:
affiliation = Affiliation.EMP
elif at in DST_CODES:
affiliation = Affiliation.DST
elif at in MEM_CODES:
affiliation = Affiliation.MEM
elif at in SER_CODES:
affiliation = Affiliation.SER
elif at in QUA_CODES:
affiliation = Affiliation.QUA
elif at in INV_POS_CODES:
affiliation = Affiliation.POS
elif at in EDU_CODES:
affiliation = Affiliation.EDU
else:
logger.info(f"For {user} not able to determine affiliaton type with {org}")
ar.add_status_line(
f"Unsupported affiliation type '{at}' allowed values are: "
", ".join(at for at in AFFILIATION_TYPES)
)
ar.save()
continue
no_orcid_call = match_put_code(affiliations.get(str(affiliation).lower()), ar)
if no_orcid_call:
ar.add_status_line(f"{str(affiliation)} record unchanged.")
else:
put_code, orcid, created, visibility = api.create_or_update_affiliation(
affiliation=affiliation, **ar.__data__
)
if created:
ar.add_status_line(f"{str(affiliation)} record was created.")
else:
ar.add_status_line(f"{str(affiliation)} record was updated.")
ar.orcid = orcid
ar.put_code = put_code
if ar.visibility != visibility:
ar.visibility = visibility
except Exception as ex:
logger.exception(f"For {user} encountered exception")
ar.add_status_line(f"Exception occured processing the record: {ex}.")
finally:
ar.processed_at = datetime.utcnow()
ar.save()
else:
for task_by_user in records:
user = User.get(email=task_by_user.record.email, organisation=task_by_user.org)
user_org = UserOrg.get(user=user, org=task_by_user.org)
token = new_invitation_token()
with app.app_context():
invitation_url = flask.url_for(
"orcid_login",
invitation_token=token,
_external=True,
_scheme="http" if app.debug else "https",
)
send_email(
"email/researcher_reinvitation.html",
recipient=(user.organisation.name, user.email),
reply_to=f"{task_by_user.created_by.name} <{task_by_user.created_by.email}>",
invitation_url=invitation_url,
org_name=user.organisation.name,
org=org,
user=user,
)
UserInvitation.create(
invitee_id=user.id,
inviter_id=task_by_user.created_by.id,
org=org,
email=user.email,
first_name=user.first_name,
last_name=user.last_name,
orcid=user.orcid,
organisation=org.name,
city=org.city,
region=org.region,
country=org.country,
start_date=task_by_user.record.start_date,
end_date=task_by_user.record.end_date,
affiliations=user_org.affiliations,
disambiguated_id=org.disambiguated_id,
disambiguation_source=org.disambiguation_source,
token=token,
)
status = "Exception occured while accessing user's profile. Hence, The invitation resent at "
status += datetime.utcnow().isoformat(timespec="seconds")
AffiliationRecord.update(status=AffiliationRecord.status + "\n" + status).where(
AffiliationRecord.status.is_null(False), AffiliationRecord.email == user.email
).execute()
AffiliationRecord.update(status=status).where(
AffiliationRecord.status.is_null(), AffiliationRecord.email == user.email
).execute()
return
@rq.job(timeout=300)
def process_work_records(max_rows=20, record_id=None):
"""Process uploaded work records."""
set_server_name()
task_ids = set()
work_ids = set()
"""This query is to retrieve Tasks associated with work records, which are not processed but are active"""
tasks = (
Task.select(
Task,
WorkRecord,
WorkInvitee,
User,
UserInvitation.id.alias("invitation_id"),
OrcidToken,
)
.where(
WorkRecord.processed_at.is_null(),
WorkInvitee.processed_at.is_null(),
WorkRecord.is_active,
(
OrcidToken.id.is_null(False)
| (
(WorkInvitee.status.is_null())
| (WorkInvitee.status.contains("sent").__invert__())
)
),
)
.join(WorkRecord, on=(Task.id == WorkRecord.task_id), attr="record")
.join(WorkInvitee, on=(WorkRecord.id == WorkInvitee.record_id), attr="invitee")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == WorkInvitee.email)
| ((User.orcid == WorkInvitee.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=((UserInvitation.email == WorkInvitee.email) & (UserInvitation.task_id == Task.id)),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
)
if record_id:
tasks = tasks.where(WorkRecord.id == record_id)
tasks = tasks.order_by(Task.id, Task.org_id, WorkRecord.id, User.id).limit(max_rows)
tasks = list(tasks)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(
tasks,
lambda t: (
t.id,
t.org_id,
t.record.id,
(lambda r: r.user if r.user.id else None)(t.record.invitee),
),
):
# If we have the token associated to the user then update the work record,
# otherwise send him an invite
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name,
),
): # noqa: E501
email = k[2]
try:
send_user_invitation(*k, task_id=task_id)
except Exception as ex:
(
WorkInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
WorkInvitee.email == email,
WorkInvitee.record_id == record_id,
WorkInvitee.processed_at.is_null(),
)
).execute()
else:
create_or_update_work(user, org_id, tasks_by_user)
task_ids.add(task_id)
work_ids.add(record_id)
for record in WorkRecord.select().where(WorkRecord.id << work_ids):
# The Work record is processed for all invitees
if not (
WorkInvitee.select()
.where(WorkInvitee.record_id == record.id, WorkInvitee.processed_at.is_null())
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Work record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
WorkRecord.select()
.where(WorkRecord.task_id == task.id, WorkRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
WorkRecord.select()
.where(WorkRecord.task_id == task.id, WorkRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"workrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/work_task_completed.html",
subject="Work Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Work",
filename=task.filename,
)
@rq.job(timeout=300)
def process_peer_review_records(max_rows=20, record_id=None):
"""Process uploaded peer_review records."""
set_server_name()
task_ids = set()
peer_review_ids = set()
"""This query is to retrieve Tasks associated with peer review records, which are not processed but are active"""
tasks = (
Task.select(
Task,
PeerReviewRecord,
PeerReviewInvitee,
User,
UserInvitation.id.alias("invitation_id"),
OrcidToken,
)
.where(
PeerReviewRecord.processed_at.is_null(),
PeerReviewInvitee.processed_at.is_null(),
PeerReviewRecord.is_active,
(
OrcidToken.id.is_null(False)
| (
(PeerReviewInvitee.status.is_null())
| (PeerReviewInvitee.status.contains("sent").__invert__())
)
),
)
.join(PeerReviewRecord, on=(Task.id == PeerReviewRecord.task_id), attr="record")
.join(
PeerReviewInvitee,
on=(PeerReviewRecord.id == PeerReviewInvitee.record_id),
attr="invitee",
)
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == PeerReviewInvitee.email)
| ((User.orcid == PeerReviewInvitee.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == PeerReviewInvitee.email)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
)
if record_id:
tasks = tasks.where(PeerReviewRecord.id == record_id)
tasks = tasks.order_by(Task.id, Task.org_id, PeerReviewRecord.id, User.id).limit(max_rows)
tasks = list(tasks)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(
tasks,
lambda t: (
t.id,
t.org_id,
t.record.id,
(lambda r: r.user if r.user.id else None)(t.record.invitee),
),
):
"""If we have the token associated to the user then update the peer record, otherwise send him an invite"""
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name,
),
): # noqa: E501
email = k[2]
try:
send_user_invitation(*k, task_id=task_id)
except Exception as ex:
(
PeerReviewInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
PeerReviewInvitee.email == email,
PeerReviewInvitee.record_id == record_id,
PeerReviewInvitee.processed_at.is_null(),
)
).execute()
else:
create_or_update_peer_review(user, org_id, tasks_by_user)
task_ids.add(task_id)
peer_review_ids.add(record_id)
for record in PeerReviewRecord.select().where(PeerReviewRecord.id << peer_review_ids):
# The Peer Review record is processed for all invitees
if not (
PeerReviewInvitee.select()
.where(
PeerReviewInvitee.record_id == record.id, PeerReviewInvitee.processed_at.is_null()
)
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Peer Review record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
PeerReviewRecord.select()
.where(PeerReviewRecord.task_id == task.id, PeerReviewRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
PeerReviewRecord.select()
.where(PeerReviewRecord.task_id == task.id, PeerReviewRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"peerreviewrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/work_task_completed.html",
subject="Peer Review Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Peer Review",
filename=task.filename,
)
@rq.job(timeout=300)
def process_funding_records(max_rows=20, record_id=None):
"""Process uploaded affiliation records."""
set_server_name()
task_ids = set()
funding_ids = set()
"""This query is to retrieve Tasks associated with funding records, which are not processed but are active"""
tasks = (
Task.select(
Task,
FundingRecord,
FundingInvitee,
User,
UserInvitation.id.alias("invitation_id"),
OrcidToken,
)
.where(
FundingRecord.processed_at.is_null(),
FundingInvitee.processed_at.is_null(),
FundingRecord.is_active,
(
OrcidToken.id.is_null(False)
| (
(FundingInvitee.status.is_null())
| (FundingInvitee.status.contains("sent").__invert__())
)
),
)
.join(FundingRecord, on=(Task.id == FundingRecord.task_id), attr="record")
.join(FundingInvitee, on=(FundingRecord.id == FundingInvitee.record_id), attr="invitee")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == FundingInvitee.email)
| ((User.orcid == FundingInvitee.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == FundingInvitee.email)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
.limit(max_rows)
)
if record_id:
tasks = tasks.where(FundingRecord.id == record_id)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(
tasks,
lambda t: (
t.id,
t.org_id,
t.record.id,
(lambda r: r.user if r.user.id else None)(t.record.invitee),
),
):
"""If we have the token associated to the user then update the funding record, otherwise send him an invite"""
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name,
),
): # noqa: E501
email = k[2]
try:
send_user_invitation(*k, task_id=task_id)
except Exception as ex:
(
FundingInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
FundingInvitee.email == email,
FundingInvitee.record_id == record_id,
FundingInvitee.processed_at.is_null(),
)
).execute()
else:
create_or_update_funding(user, org_id, tasks_by_user)
task_ids.add(task_id)
funding_ids.add(record_id)
for record in FundingRecord.select().where(FundingRecord.id << funding_ids):
# The funding record is processed for all invitees
if not (
FundingInvitee.select()
.where(FundingInvitee.record_id == record.id, FundingInvitee.processed_at.is_null())
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Funding record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
FundingRecord.select()
.where(FundingRecord.task_id == task.id, FundingRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
FundingRecord.select()
.where(FundingRecord.task_id == task.id, FundingRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"fundingrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/funding_task_completed.html",
subject="Funding Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
filename=task.filename,
)
@rq.job(timeout=300)
def process_affiliation_records(max_rows=20, record_id=None):
"""Process uploaded affiliation records."""
set_server_name()
# TODO: optimize removing redundant fields
# TODO: perhaps it should be broken into 2 queries
task_ids = set()
tasks = (
Task.select(
Task, AffiliationRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
AffiliationRecord.processed_at.is_null(),
AffiliationRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
AffiliationRecord.status.is_null()
| AffiliationRecord.status.contains("sent").__invert__()
)
)
),
)
.join(AffiliationRecord, on=(Task.id == AffiliationRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == AffiliationRecord.email)
| ((User.orcid == AffiliationRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == AffiliationRecord.email)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
.limit(max_rows)
)
if record_id:
if isinstance(record_id, list):
tasks = tasks.where(AffiliationRecord.id.in_(record_id))
else:
tasks = tasks.where(AffiliationRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))
)
.exists()
): # noqa: E127, E129
# maps invitation attributes to affiliation type set:
# - the user who uploaded the task;
# - the user organisation;
# - the invitee email;
# - the invitee first_name;
# - the invitee last_name
invitation_dict = {
k: set(t.record.affiliation_type.lower() for t in tasks)
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
), # noqa: E501
) # noqa: E501
}
for invitation, affiliations in invitation_dict.items():
email = invitation[2]
try:
send_user_invitation(
*invitation, affiliation_types=affiliations, task_id=task_id
)
except Exception as ex:
(
AffiliationRecord.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
AffiliationRecord.task_id == task_id,
AffiliationRecord.email == email,
AffiliationRecord.processed_at.is_null(),
)
).execute()
else: # user exits and we have tokens
create_or_update_affiliations(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
if not (
AffiliationRecord.select()
.where(AffiliationRecord.task_id == task.id, AffiliationRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
AffiliationRecord.select()
.where(AffiliationRecord.task_id == task.id, AffiliationRecord.status ** "%error%")
.count()
)
row_count = task.record_count
orcid_rec_count = (
task.affiliation_records.select(AffiliationRecord.orcid).distinct().count()
)
if task.filename and "INTEGRATION" not in task.filename:
with app.app_context():
export_url = flask.url_for(
"affiliationrecord.export",
export_type="csv",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Affiliation Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
orcid_rec_count=orcid_rec_count,
export_url=export_url,
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process comletion notification message."
)
@rq.job(timeout=300)
def process_property_records(max_rows=20, record_id=None):
"""Process uploaded property records."""
set_server_name()
# TODO: optimize removing redundant fields
# TODO: perhaps it should be broken into 2 queries
task_ids = set()
tasks = (
Task.select(
Task, PropertyRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
PropertyRecord.processed_at.is_null(),
PropertyRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
PropertyRecord.status.is_null()
| PropertyRecord.status.contains("sent").__invert__()
)
)
),
)
.join(PropertyRecord, on=(Task.id == PropertyRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == PropertyRecord.email)
| ((User.orcid == PropertyRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(
(UserInvitation.email == PropertyRecord.email)
| (UserInvitation.email == User.email)
)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/person/update"))
),
)
)
if max_rows:
tasks = tasks.limit(max_rows)
if record_id:
if isinstance(record_id, list):
tasks = tasks.where(PropertyRecord.id.in_(record_id))
else:
tasks = tasks.where(PropertyRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
not user
or not user.orcid
or not OrcidToken.select()
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org_id,
OrcidToken.scopes.contains("/person/update"),
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
user,
),
): # noqa: E501
try:
send_user_invitation(*k, task_id=task_id)
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds"
)
for r in tasks:
r.record.add_status_line(status)
r.record.save()
except Exception as ex:
for r in tasks:
r.record.add_status_line(f"Failed to send an invitation: {ex}.")
r.record.save()
else:
create_or_update_properties(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
if not (
PropertyRecord.select()
.where(PropertyRecord.task_id == task.id, PropertyRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
PropertyRecord.select()
.where(PropertyRecord.task_id == task.id, PropertyRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"propertyrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Researcher Property Record Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Researcher Property",
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process completion notification message."
)
@rq.job(timeout=300)
def process_other_id_records(max_rows=20, record_id=None):
"""Process uploaded Other ID records."""
set_server_name()
# TODO: optimize
task_ids = set()
tasks = (
Task.select(
Task, OtherIdRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
OtherIdRecord.processed_at.is_null(),
OtherIdRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
OtherIdRecord.status.is_null()
| OtherIdRecord.status.contains("sent").__invert__()
)
)
),
)
.join(OtherIdRecord, on=(Task.id == OtherIdRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == OtherIdRecord.email)
| ((User.orcid == OtherIdRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(UserInvitation.email == OtherIdRecord.email) & (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/person/update"))
),
)
.limit(max_rows)
)
if record_id:
tasks = tasks.where(OtherIdRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
user is None
or user.orcid is None
or not OrcidToken.select()
.where(
(OrcidToken.user_id == user.id)
& (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/person/update"))
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
),
): # noqa: E501
try:
email = k[2]
send_user_invitation(
*k, task_id=task_id, invitation_template="email/property_invitation.html"
)
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds"
)
(
OtherIdRecord.update(status=OtherIdRecord.status + "\n" + status)
.where(
OtherIdRecord.status.is_null(False),
OtherIdRecord.task_id == task_id,
OtherIdRecord.email == email,
)
.execute()
)
(
OtherIdRecord.update(status=status)
.where(
OtherIdRecord.status.is_null(),
OtherIdRecord.task_id == task_id,
OtherIdRecord.email == email,
)
.execute()
)
except Exception as ex:
(
OtherIdRecord.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.",
).where(
OtherIdRecord.task_id == task_id,
OtherIdRecord.email == email,
OtherIdRecord.processed_at.is_null(),
)
).execute()
else:
create_or_update_other_id(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
if not (
OtherIdRecord.select()
.where(OtherIdRecord.task_id == task.id, OtherIdRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
OtherIdRecord.select()
.where(OtherIdRecord.task_id == task.id, OtherIdRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"otheridrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Other ID Record Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Other ID",
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process completion notification message."
)
@rq.job(timeout=300)
def process_resource_records(max_rows=20, record_id=None):
"""Process uploaded resoucre records."""
set_server_name()
# TODO: optimize removing redundant fields
# TODO: perhaps it should be broken into 2 queries
task_ids = set()
tasks = (
Task.select(
Task, ResourceRecord, User, UserInvitation.id.alias("invitation_id"), OrcidToken
)
.where(
ResourceRecord.processed_at.is_null(),
ResourceRecord.is_active,
(
(User.id.is_null(False) & User.orcid.is_null(False) & OrcidToken.id.is_null(False))
| (
(User.id.is_null() | User.orcid.is_null() | OrcidToken.id.is_null())
& UserInvitation.id.is_null()
& (
ResourceRecord.status.is_null()
| ResourceRecord.status.contains("sent").__invert__()
)
)
),
)
.join(ResourceRecord, on=(Task.id == ResourceRecord.task_id), attr="record")
.join(
User,
JOIN.LEFT_OUTER,
on=(
(User.email == ResourceRecord.email)
| ((User.orcid == ResourceRecord.orcid) & (User.organisation_id == Task.org_id))
),
)
.join(Organisation, JOIN.LEFT_OUTER, on=(Organisation.id == Task.org_id))
.join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id) & (UserOrg.org_id == Organisation.id)),
)
.join(
UserInvitation,
JOIN.LEFT_OUTER,
on=(
(
(UserInvitation.email == ResourceRecord.email)
| (UserInvitation.email == User.email)
)
& (UserInvitation.task_id == Task.id)
),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update"))
),
)
)
if max_rows:
tasks = tasks.limit(max_rows)
if record_id:
if isinstance(record_id, list):
tasks = tasks.where(ResourceRecord.id.in_(record_id))
else:
tasks = tasks.where(ResourceRecord.id == record_id)
for (task_id, org_id, user), tasks_by_user in groupby(
tasks, lambda t: (t.id, t.org_id, (lambda r: r.user if r.user.id else None)(t.record))
):
if (
not user
or not user.orcid
or not OrcidToken.select()
.where(
OrcidToken.user_id == user.id,
OrcidToken.org_id == org_id,
OrcidToken.scopes.contains("/activities/update"),
)
.exists()
): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.email,
t.record.first_name,
t.record.last_name,
user,
),
): # noqa: E501
try:
send_user_invitation(*k, task_id=task_id)
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds"
)
for r in tasks:
r.record.add_status_line(status)
r.record.save()
except Exception as ex:
for r in tasks:
r.record.add_status_line(f"Failed to send an invitation: {ex}.")
r.record.save()
else:
create_or_update_resources(user, org_id, tasks_by_user)
task_ids.add(task_id)
for task in Task.select().where(Task.id << task_ids):
# The task is completed (all recores are processed):
rm = task.record_model
if not (rm.select().where(rm.task_id == task.id, rm.processed_at.is_null()).exists()):
task.completed_at = datetime.utcnow()
task.save()
error_count = rm.select().where(rm.task_id == task.id, rm.status ** "%error%").count()
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"resourcerecord.export",
export_type="json" if task.is_raw else "csv",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
try:
send_email(
"email/task_completed.html",
subject="Research Rresource Record Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Research Resource",
filename=task.filename,
)
except Exception:
logger.exception(
"Failed to send batch process completion notification message."
)
@rq.job(timeout=300)
def process_message_records(max_rows=20, record_id=None):
"""Process uploaded ORCID message records."""
RecordInvitee = MessageRecord.invitees.get_through_model() # noqa: N806
set_server_name()
record_ids = set()
task_ids = set()
tasks = (
Task.select(Task, MessageRecord, RecordInvitee, Invitee, User, OrcidToken)
.where(Task.is_raw, Invitee.processed_at.is_null(), MessageRecord.is_active)
.join(MessageRecord, attr="record")
.join(RecordInvitee, attr="ri")
.join(Invitee)
.join(
User,
JOIN.LEFT_OUTER,
on=((User.email == Invitee.email) | ((User.orcid == Invitee.orcid))),
)
.join(
OrcidToken,
JOIN.LEFT_OUTER,
on=(
(OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Task.org_id)
& (OrcidToken.scopes.contains("/activities/update"))
),
attr="token",
)
)
if record_id:
if isinstance(record_id, (list, tuple)):
tasks = tasks.where(MessageRecord.id << record_id)
else:
tasks = tasks.where(MessageRecord.id == record_id)
if max_rows:
tasks = tasks.limit(max_rows)
# Send an invitation
for task_id, records in groupby(
tasks.where(OrcidToken.id.is_null()).order_by(
Task.id, Invitee.email, Invitee.first_name, Invitee.last_name
),
lambda t: t.id,
):
task_ids.add(task_id)
invitees = set(
(
t.created_by,
t.org,
t.record.ri.invitee.email or t.record.ri.invitee.user.email,
t.record.ri.invitee.first_name or t.record.ri.invitee.user.first_name,
t.record.ri.invitee.last_name or t.record.ri.invitee.user.last_name,
)
for t in records
)
for invitee in invitees: # noqa: E501
send_user_invitation(*invitee, task_id=task_id)
# Create or update the resource record
for (task_id, task_type, org_id, user), records in groupby(
tasks.where(OrcidToken.id.is_null(False)).order_by(Task.id, User.id, MessageRecord.id),
lambda t: (t.id, t.task_type, t.org_id, t.record.ri.invitee.user),
):
# TODO: in the future - implememnt for other types:
task_ids.add(task_id)
records = list(records)
create_or_update_record_from_messages(records)
record_ids.update(r.record.id for r in records)
for record in MessageRecord.select().where(MessageRecord.id << record_ids):
# The Work record is processed for all invitees
if not (
RecordInvitee.select()
.join(Invitee)
.where(RecordInvitee.messagerecord_id == record.id, Invitee.processed_at.is_null())
.exists()
):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (
MessageRecord.select()
.where(MessageRecord.task_id == task.id, MessageRecord.processed_at.is_null())
.exists()
):
task.completed_at = datetime.utcnow()
task.save()
error_count = (
MessageRecord.select()
.where(MessageRecord.task_id == task.id, MessageRecord.status ** "%error%")
.count()
)
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"messagerecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/task_completed.html",
subject=f"Batch Process Update: {task.filename}",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
filename=task.filename,
)
@rq.job(timeout=300)
def process_tasks(max_rows=20):
"""Handle batch task expiration.
Send a information messages about upcoming removal of the processed/uploaded tasks
based on date whichever is greater either created_at + month or updated_at + 2 weeks
and removal of expired tasks based on the expiry date.
Args:
max_rows (int): The maximum number of rows that will get processed in one go.
Returns:
int. The number of processed task records.
"""
Task.delete().where((Task.expires_at < datetime.utcnow())).execute()
tasks = Task.select().where(Task.expires_at.is_null())
if max_rows and max_rows > 0:
tasks = tasks.limit(max_rows)
for task in tasks:
max_created_at_expiry = task.created_at + timedelta(weeks=4)
max_updated_at_expiry = task.updated_at + timedelta(weeks=2)
max_expiry_date = max_created_at_expiry
if max_created_at_expiry < max_updated_at_expiry:
max_expiry_date = max_updated_at_expiry
task.expires_at = max_expiry_date
task.save()
tasks = Task.select().where(
Task.expires_at.is_null(False),
Task.expiry_email_sent_at.is_null(),
Task.expires_at < (datetime.now() + timedelta(weeks=1)),
)
if max_rows and max_rows > 0:
tasks = tasks.limit(max_rows)
for task in tasks:
if not task.task_type or task.records is None:
app.logger.error(f'Unknown task "{task}" (ID: {task.id}) task type.')
continue
if task.filename and "INTEGRATION" in task.filename:
continue
export_model = task.record_model._meta.name + ".export"
error_count = task.error_count
set_server_name()
with app.app_context():
export_url = flask.url_for(
export_model,
export_type="csv",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True,
)
send_email(
"email/task_expiration.html",
task=task,
subject="Batch process task is about to expire",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
export_url=export_url,
)
task.expiry_email_sent_at = datetime.utcnow()
task.save()
def get_client_credentials_token(org, scopes="/webhook"):
"""Request a cient credetials grant type access token and store it.
The any previously requesed with the give scope tokens will be deleted.
"""
resp = requests.post(
app.config["TOKEN_URL"],
headers={"Accept": "application/json"},
data=dict(
client_id=org.orcid_client_id,
client_secret=org.orcid_secret,
scope=scopes,
grant_type="client_credentials",
),
)
OrcidToken.delete().where(OrcidToken.org == org, OrcidToken.scopes == scopes).execute()
data = resp.json()
token = OrcidToken.create(
org=org,
access_token=data["access_token"],
refresh_token=data["refresh_token"],
scopes=data.get("scope") or scopes,
expires_in=data["expires_in"],
)
return token
@rq.job(timeout=300)
def register_orcid_webhook(user, callback_url=None, delete=False):
"""Register or delete an ORCID webhook for the given user profile update events.
If URL is given, it will be used for as call-back URL.
"""
local_handler = callback_url is None
# Don't delete the webhook if there is anyther organisation with enabled webhook:
if (
local_handler
and delete
and user.organisations.where(Organisation.webhook_enabled).count() > 1
):
return
# Any 'webhook' access token can be used:
token = (
OrcidToken.select()
.where(OrcidToken.org == user.organisation, OrcidToken.scopes == "/webhook")
.order_by(OrcidToken.id.desc())
.first()
)
if not token:
token = get_client_credentials_token(org=user.organisation, scopes="/webhook")
if local_handler:
set_server_name()
with app.app_context():
callback_url = quote(
url_for("update_webhook", user_id=user.id, _external=True, _scheme="https"),
safe="",
)
elif "/" in callback_url or ":" in callback_url:
callback_url = quote(callback_url, safe="")
url = f"{app.config['ORCID_API_HOST_URL']}{user.orcid}/webhook/{callback_url}"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {token.access_token}",
"Content-Length": "0",
}
call = OrcidApiCall(method="DELETE" if delete else "PUT", url=url, query_params=headers)
resp = requests.delete(url, headers=headers) if delete else requests.put(url, headers=headers)
call.response = resp.text
call.status = resp.status_code
call.set_response_time()
call.save()
if local_handler:
user.webhook_enabled = (resp.status_code in [201, 204]) and not delete
user.save()
if resp.status_code not in [201, 204]:
raise ApiException(f"Failed to register or delete webhook {callback_url}: {resp.text}")
return resp
def notify_about_update(user, event_type="UPDATED"):
"""Notify all organisation about changes of the user."""
for org in user.organisations.where(
Organisation.webhook_enabled | Organisation.email_notifications_enabled
):
if org.webhook_enabled and org.webhook_url:
invoke_webhook_handler.queue(
org.id,
user.orcid,
user.created_at or user.updated_at,
user.updated_at or user.created_at,
event_type=event_type,
)
if org.email_notifications_enabled:
url = app.config["ORCID_BASE_URL"] + user.orcid
send_email(
f"""<p>User {user.name} (<a href="{url}" target="_blank">{user.orcid}</a>)
{"profile was updated" if event_type == "UPDATED" else "has linked their account"} at
{(user.updated_at or user.created_at).isoformat(timespec="minutes", sep=' ')}.</p>""",
recipient=org.notification_email
or (org.tech_contact.name, org.tech_contact.email),
cc_email=(org.tech_contact.name, org.tech_contact.email)
if org.notification_email
else None,
subject=f"ORCID Profile Update ({user.orcid})",
org=org,
)
@rq.job(timeout=300)
def invoke_webhook_handler(
org_id=None,
orcid=None,
created_at=None,
updated_at=None,
message=None,
event_type="UPDATED",
attempts=5,
*args,
**kwargs,
):
"""Propagate 'updated' event to the organisation event handler URL."""
if not message:
url = app.config["ORCID_BASE_URL"] + orcid
message = {"orcid": orcid, "url": url, "type": event_type}
if event_type == "CREATED" and created_at:
message["created-at"] = created_at.isoformat(timespec="seconds")
if updated_at:
message["updated-at"] = updated_at.isoformat(timespec="seconds")
if orcid:
user = (
User.select().where(User.orcid == orcid).order_by(User.id.desc()).limit(1).first()
)
if user:
message["email"] = user.email
if user.eppn:
message["eppn"] = user.eppn
if org_id:
org = Organisation.get(id=org_id)
else:
org = User.select().where(User.orcid == orcid).first().organisation
org_id = org.id
url = org.webhook_url
if org.webhook_append_orcid:
if not url.endswith("/"):
url += "/"
url += orcid
try:
app.logger.info(f"Invoking webhook: {url} with payload: {message}")
if org.webhook_apikey:
resp = requests.post(url, json=message, headers=dict(apikey=org.webhook_apikey))
else:
resp = requests.post(url, json=message)
except:
if attempts == 1:
raise
if not resp or resp.status_code // 200 != 1:
if attempts > 1:
invoke_webhook_handler.schedule(
timedelta(minutes=5 * (6 - attempts) if attempts < 6 else 5),
org_id=org_id,
message=message,
attempts=attempts - 1,
)
else:
raise Exception(f"Failed to propaged the event. Status code: {resp.status_code}")
return resp
@rq.job(timeout=300)
def enable_org_webhook(org):
"""Enable Organisation Webhook."""
org.webhook_enabled = True
org.save()
for u in org.users.where(
User.webhook_enabled.NOT(), User.orcid.is_null(False) | (User.orcid != "")
):
if u.orcid.strip():
register_orcid_webhook.queue(u)
@rq.job(timeout=300)
def disable_org_webhook(org):
"""Disable Organisation Webhook."""
org.webhook_enabled = False
org.save()
for u in org.users.where(User.webhook_enabled, User.orcid.is_null(False) | (User.orcid != "")):
if u.orcid.strip():
register_orcid_webhook.queue(u, delete=True)
def process_records(n):
"""Process first n records and run other batch tasks."""
process_affiliation_records(n)
process_funding_records(n)
process_work_records(n)
process_peer_review_records(n)
process_property_records(n)
process_other_id_records(n)
# process_tasks(n)
@rq.job(timeout=300)
def send_orcid_update_summary(org_id=None):
"""Send organisation researcher ORCID profile update summary report."""
first = date.today().replace(day=1)
previous_last = first - timedelta(days=1)
previous_first = previous_last.replace(day=1)
if org_id is None:
for o in (
Organisation.select(Organisation.id)
.distinct()
.join(UserOrg, on=UserOrg.org_id == Organisation.id)
.join(User, on=User.id == UserOrg.user_id)
.where(Organisation.webhook_enabled, Organisation.email_notifications_enabled)
.where(User.orcid_updated_at >= previous_first, User.orcid_updated_at < first)
):
send_orcid_update_summary.queue(o.id)
return
org = Organisation.select().where(Organisation.id == org_id).first()
if org and org.webhook_enabled and org.email_notifications_enabled:
updated_users = org.users.where(
User.orcid_updated_at >= previous_first, User.orcid_updated_at < first
)
recipient = org.notification_email or (org.tech_contact.name, org.tech_contact.email)
if updated_users.exists():
message_template = """<p>The flollowing user profiles were updated
from {{date_from}} until {{date_to}}:</p>
<ul>
{% for u in updated_users %}
<li>{{u.name}} ({{u.email}},
<a href="{{orcid_base_url}}{{u.orcid}}" target="_blank">{{u.orcid}}</a>,
updated at {{u.orcid_updated_at.isoformat(sep=" ", timespec="seconds")}});
</li>
{% endfor %}
</ul>
"""
set_server_name()
with app.app_context():
send_email(
message_template,
org=org,
recipient=recipient,
subject="Updated ORCID Profiles",
date_from=previous_first,
date_to=previous_last,
updated_users=updated_users,
orcid_base_url=app.config["ORCID_BASE_URL"],
)
@rq.job(timeout=300)
def sync_profile(task_id, delay=0.1):
"""Verify and sync the user profile."""
if not task_id:
return
try:
task = Task.get(task_id)
except Task.DoesNotExist:
return
org = task.org
if not org.disambiguated_id:
return
api = orcid_client.MemberAPIV3(org=org)
count = 0
for u in (
task.org.users.select(User, OrcidToken.access_token.alias("access_token"))
.where(User.orcid.is_null(False))
.join(
OrcidToken,
on=(
(OrcidToken.user_id == User.id) & OrcidToken.scopes.contains("/activities/update")
),
)
.objects()
):
Log.create(task=task_id, message=f"Processing user {u} / {u.orcid} profile.")
api.sync_profile(user=u, access_token=u.access_token, task=task)
count += 1
time.sleep(delay)
Log.create(task=task_id, message=f"In total, {count} user profiles were synchronized.")
class SafeRepresenterWithISODate(SafeRepresenter):
"""Customized representer for datetaime rendering in ISO format."""
def represent_datetime(self, data):
"""Customize datetime rendering in ISO format."""
value = data.isoformat(timespec="seconds")
return self.represent_scalar("tag:yaml.org,2002:timestamp", value)
def dump_yaml(data):
"""Dump the objects into YAML representation."""
yaml.add_representer(datetime, SafeRepresenterWithISODate.represent_datetime, Dumper=Dumper)
yaml.add_representer(defaultdict, SafeRepresenter.represent_dict)
return yaml.dump(data, allow_unicode=True)
def enqueue_user_records(user):
"""Enqueue all active and not yet processed record related to the user."""
for task in list(
Task.select().where(Task.completed_at.is_null(), Task.task_type != TaskType.SYNC)
):
func = globals().get(f"process_{task.task_type.name.lower()}_records")
records = task.records.where(
task.record_model.is_active, task.record_model.processed_at.is_null()
)
if task.task_type == TaskType.FUNDING:
records = records.join(FundingInvitee).where(
(FundingInvitee.email.is_null() | (FundingInvitee.email == user.email)),
(FundingInvitee.orcid.is_null() | (FundingInvitee.orcid == user.orcid)),
)
elif task.task_type == TaskType.PEER_REVIEW:
records = records.join(PeerReviewInvitee).where(
(PeerReviewInvitee.email.is_null() | (PeerReviewInvitee.email == user.email)),
(PeerReviewInvitee.orcid.is_null() | (PeerReviewInvitee.orcid == user.orcid)),
)
elif task.task_type == TaskType.WORK:
records = records.join(WorkInvitee).where(
(WorkInvitee.email.is_null() | (WorkInvitee.email == user.email)),
(WorkInvitee.orcid.is_null() | (WorkInvitee.orcid == user.orcid)),
)
elif task.task_type == TaskType.RESOURCE and task.is_raw:
invitee_model = task.record_model.invitees.rel_model
records = (
records.join(RecordInvitee)
.join(Invitee)
.where(
(invitee_model.email.is_null() | (invitee_model.email == user.email)),
(invitee_model.orcid.is_null() | (invitee_model.orcid == user.orcid)),
)
)
else:
records = records.where(
(task.record_model.email.is_null() | (task.record_model.email == user.email)),
(task.record_model.orcid.is_null() | (task.record_model.orcid == user.orcid)),
)
record_ids = [r.id for r in records]
if record_ids:
if task.task_type == TaskType.AFFILIATION:
func.queue(record_id=record_ids)
else:
for record_id in record_ids:
func.queue(record_id=record_id)
def enqueue_task_records(task):
"""Enqueue all active and not yet processed record."""
records = task.records.where(
task.record_model.is_active, task.record_model.processed_at.is_null()
)
if task.is_raw:
return process_message_records.queue(record_id=[r.id for r in records])
func = globals().get(f"process_{task.task_type.name.lower()}_records")
if task.task_type in [TaskType.AFFILIATION, TaskType.PROPERTY]:
records = records.order_by(task.record_model.email, task.record_model.orcid)
for _, chunk in groupby(records, lambda r: (r.email, r.orcid)):
func.queue(record_id=[r.id for r in chunk])
else:
for r in records:
func.queue(record_id=r.id)
def activate_all_records(task):
"""Activate all submitted task records and enqueue it for processing."""
with db.atomic():
try:
status = "The record was activated at " + datetime.now().isoformat(timespec="seconds")
count = (
task.record_model.update(is_active=True, status=status)
.where(
task.record_model.task == task,
(
task.record_model.is_active.is_null()
| (task.record_model.is_active == False) # noqa: E712
),
)
.execute()
) # noqa: E712
task.status = "ACTIVE"
task.save()
enqueue_task_records(task)
except:
db.rollback()
app.logger.exception("Failed to activate the selected records")
raise
return count
def reset_all_records(task):
"""Batch reset of batch records."""
count = 0
with db.atomic():
try:
status = "The record was reset at " + datetime.now().isoformat(timespec="seconds")
tt = task.task_type
if tt in [TaskType.AFFILIATION, TaskType.PROPERTY, TaskType.OTHER_ID]:
count = (
task.record_model.update(processed_at=None, status=status)
.where(
task.record_model.task_id == task.id,
task.record_model.is_active == True, # noqa: E712
)
.execute()
) # noqa: E712
else:
for record in task.records.where(
task.record_model.is_active == True # noqa: E712
): # noqa: E712
record.processed_at = None
record.status = status
if hasattr(record, "invitees"):
invitee_class = record.invitees.model
invitee_class.update(processed_at=None, status=status).where(
(invitee_class.id << [i.id for i in record.invitees])
if task.is_raw
else (invitee_class.record == record.id)
).execute()
record.save()
count = count + 1
UserInvitation.delete().where(UserInvitation.task == task).execute()
enqueue_task_records(task)
except:
db.rollback()
app.logger.exception("Failed to reset the selected records")
raise
else:
task.expires_at = None
task.expiry_email_sent_at = None
task.completed_at = None
task.status = "RESET"
task.save()
return count
def plural(word):
"""Convert a reguralr noun to its regular plural form."""
if word.endswith("fe"):
# wolf -> wolves
return word[:-2] + "ves"
elif word.endswith("f"):
# knife -> knives
return word[:-1] + "ves"
elif word.endswith("o"):
# potato -> potatoes
return word + "es"
elif word.endswith("us"):
# cactus -> cacti
return word[:-2] + "i"
elif word.endswith("ion"):
# criterion -> criteria
return word + "s"
elif word.endswith("on"):
# criterion -> criteria
return word[:-2] + "a"
elif word.endswith("y"):
# community -> communities
return word[:-1] + "ies"
elif word[-1] in "sx" or word[-2:] in ["sh", "ch"]:
return word + "es"
elif word.endswith("an"):
return word[:-2] + "en"
else:
return word + "s"
| 210 | 0 | 27 |
8753108b880e316f5ac29a3dc2a15a1900dfbfe8 | 374 | py | Python | modulo-2/exercicios/biblioteca_fatorial.py | giselemanuel/programa-Ifood-backend | d12544c30e2a26f7e2e2cd85df38a3f2c8860fe7 | [
"MIT"
] | 3 | 2021-04-25T23:31:13.000Z | 2021-04-26T16:59:12.000Z | modulo-2/exercicios/biblioteca_fatorial.py | giselemanuel/programa-Ifood-backend | d12544c30e2a26f7e2e2cd85df38a3f2c8860fe7 | [
"MIT"
] | null | null | null | modulo-2/exercicios/biblioteca_fatorial.py | giselemanuel/programa-Ifood-backend | d12544c30e2a26f7e2e2cd85df38a3f2c8860fe7 | [
"MIT"
] | null | null | null | # importa a biblioteca factorial existente em math
from math import factorial
# cabeçalho do programa
print('-' * 30)
print(f'{"Calcula Fatorial":^30}')
print('-' * 30)
# solicita o número ao usuário
num = int(input('Digite um número: '))
# calcula o fatorial do número inserido
fatorial = factorial(num)
# imprime o fatorial
print(f'O fatorial de {num} é {fatorial}.')
| 22 | 50 | 0.713904 | # importa a biblioteca factorial existente em math
from math import factorial
# cabeçalho do programa
print('-' * 30)
print(f'{"Calcula Fatorial":^30}')
print('-' * 30)
# solicita o número ao usuário
num = int(input('Digite um número: '))
# calcula o fatorial do número inserido
fatorial = factorial(num)
# imprime o fatorial
print(f'O fatorial de {num} é {fatorial}.')
| 0 | 0 | 0 |
c99a6aad64d4bc1dcae50dcacbb687762ade5158 | 279 | py | Python | packages/galapagos/scripts/run_traffic_light.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | 2 | 2020-10-26T05:01:35.000Z | 2022-02-14T10:37:17.000Z | packages/galapagos/scripts/run_traffic_light.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | packages/galapagos/scripts/run_traffic_light.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rospy
from sensor_msgs.msg import CompressedImage
import processor
from constants import PATH_USBCAM
IS_DEBUG_MODE = True
CURRENT_STATE = 'traffic_light'
rospy.Subscriber(PATH_USBCAM, CompressedImage, processor.process_front_image, queue_size=1)
| 23.25 | 91 | 0.83871 | #!/usr/bin/env python3
import rospy
from sensor_msgs.msg import CompressedImage
import processor
from constants import PATH_USBCAM
IS_DEBUG_MODE = True
CURRENT_STATE = 'traffic_light'
rospy.Subscriber(PATH_USBCAM, CompressedImage, processor.process_front_image, queue_size=1)
| 0 | 0 | 0 |
009674357f80908e1762b2aa1b8ef5892feb9fe5 | 5,739 | py | Python | globomap_driver_sample/driver.py | globocom/globomap-driver-sample | 7f49e4cf623da2075e94906142a3a82d3977fab9 | [
"Apache-2.0"
] | null | null | null | globomap_driver_sample/driver.py | globocom/globomap-driver-sample | 7f49e4cf623da2075e94906142a3a82d3977fab9 | [
"Apache-2.0"
] | null | null | null | globomap_driver_sample/driver.py | globocom/globomap-driver-sample | 7f49e4cf623da2075e94906142a3a82d3977fab9 | [
"Apache-2.0"
] | 1 | 2019-10-30T12:28:13.000Z | 2019-10-30T12:28:13.000Z | """
Copyright 2017 Globo.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import logging
from time import time
from requests import Session
from globomap_driver_sample.util import timed_logging
from globomap_driver_sample.settings import SSL_VERIFY
logger = logging.getLogger(__name__)
| 31.190217 | 91 | 0.555323 | """
Copyright 2017 Globo.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import logging
from time import time
from requests import Session
from globomap_driver_sample.util import timed_logging
from globomap_driver_sample.settings import SSL_VERIFY
logger = logging.getLogger(__name__)
class Driver(object):
def __init__(self):
self.session = Session()
def __repr__(self):
return "globomap_driver_sample"
@timed_logging
def get_data(self):
# Sample method that retrieves data in the form of a list of dicts
# Decorator measures how long the request took and logs the method call
# Methods that make requests via clients instead of _make_requests must
# use this decorator
data = []
for i in range(1000):
name = f"data_{str(time())}"
data.append(
(name, {
"Key1": "Value1",
"Key2": "Value2"
})
)
return data
def treat_data(self, data):
# Sample method that parses data for loader's send method
to_send = []
for d in data:
metadata = properties_metadata={
"Key1": "First Key",
"Key2": "Second Key"
}
doc = self._make_document(d[0])
_id = hashlib.md5(bytes(str(d[0]).lower(), 'utf-8')).hexdigest()
doc = self._add_properties(doc, properties=d[1],
properties_metadata=metadata)
key = 'sample_{}'.format(_id)
to_send.append(self._make_data(
'sample_collection', 'collections', 'UPDATE', key, doc)
)
return to_send
# PREPARE PAYLOAD
def _make_data(self, collection, kind, action, key, element=''):
data = {
'collection': collection,
'type': kind,
'action': action,
'element': element,
'key': key
}
return data
def _make_document(self, data):
_id = hashlib.md5(bytes(str(data).lower(), 'utf-8')).hexdigest()
element = {
'id': _id,
'name': data,
'provider': 'sample',
'timestamp': int(time())
}
return element
def _make_edge(self, origin, destiny):
_id = hashlib.md5(bytes((str(origin) + str(destiny)).lower(), 'utf-8')).hexdigest()
_from = hashlib.md5(bytes(str(origin).lower(), 'utf-8')).hexdigest()
_to = hashlib.md5(bytes(str(destiny).lower(), 'utf-8')).hexdigest()
name = '{} - {}'.format(str(origin), str(destiny))
element = {
'id': _id,
'name': name,
'provider': 'sample',
'timestamp': int(time()),
'from': 'origin_collection/sample_{}'.format(_from),
'to': 'destiny_collection/sample_{}'.format(_to)
}
return element
def _add_properties(self, document, properties={}, properties_metadata=None):
# Adds properties to document 'document'
# 'properties' is a {'key': 'value'} dict
# 'properties_metadata' is a {'key': 'Key Name'} dict
if not properties:
return document
if not properties_metadata:
properties_metadata = {}
else:
new_properties_metadata = {}
for prop in properties_metadata:
new_properties_metadata[prop] = {
"description": properties_metadata[prop]
}
for prop in [x for x in properties if x not in new_properties_metadata]:
new_properties_metadata[prop] = { "description": prop }
if 'properties' in document:
document['properties'].update(properties)
document['properties_metadata'].update(new_properties_metadata)
else:
document['properties'] = properties
document['properties_metadata'] = new_properties_metadata
return document
# REQUEST
@timed_logging
def _make_request(self, uri):
# Decorator measures how long the request took and logs the method call
request_url = '{}'.format(uri)
try:
response = self.session.get(
request_url,
verify=SSL_VERIFY,
headers={
'User-Agent': 'globomap'
}
)
response.encoding = 'utf-8'
except Exception as err:
logger.exception(
'[SAMPLE][request] get - %s, Error:%s', request_url, err)
else:
status = response.status_code
content = response.json()
logger.debug(
'[SAMPLE][response] get - %s %s %s',
request_url, status, content
)
if status not in (200, 404):
logger.error(
'[SAMPLE][error] get - %s %s %s',
request_url, status, content
)
raise GloboMapException(content)
return content
class GloboMapException(Exception):
pass
| 4,532 | 340 | 46 |
eee72873efed3c5941ca89a2e1b3b2a5800f6a42 | 1,110 | py | Python | xml_py/JTS_boundary.py | mlacayoemery/wps-server-invest | c3d5eca357218dac76225028fab491b357cbc343 | [
"MIT"
] | null | null | null | xml_py/JTS_boundary.py | mlacayoemery/wps-server-invest | c3d5eca357218dac76225028fab491b357cbc343 | [
"MIT"
] | null | null | null | xml_py/JTS_boundary.py | mlacayoemery/wps-server-invest | c3d5eca357218dac76225028fab491b357cbc343 | [
"MIT"
] | null | null | null | import pywps
| 48.26087 | 318 | 0.657658 | import pywps
class invest(pywps.Process):
def __init__(self):
inputs = [pywps.ComplexInput('geom', 'Input geometry', supported_formats=[pywps.Format('text/xml; subtype=gml/3.1.1'), pywps.Format('text/xml; subtype=gml/2.1.2'), pywps.Format('application/wkt'), pywps.Format('application/json'), pywps.Format('application/gml-3.1.1'), pywps.Format('application/gml-2.1.2')])]
outputs = [pywps.LiteralOutput('result', 'None', data_type='string')]
super(invest, self).__init__(
self._handler,
identifier='JTS:boundary',
title='Boundary',
abstract='Returns a geometry boundary. For polygons, returns a linear ring or multi-linestring equal to the boundary of the polygon(s). For linestrings, returns a multipoint equal to the endpoints of the linestring. For points, returns an empty geometry collection.',
version='None',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
return response
| 1,014 | 7 | 76 |
8bdac6f8e203e3cd81868d56a700bda5289d768f | 497 | py | Python | ModelTracker/models.py | boladjivinny/ModelTracker | 05693f09bf002cc934e9d682f2598c05d18c3089 | [
"MIT"
] | 8 | 2019-02-02T15:33:30.000Z | 2022-02-28T14:17:50.000Z | ModelTracker/models.py | boladjivinny/ModelTracker | 05693f09bf002cc934e9d682f2598c05d18c3089 | [
"MIT"
] | null | null | null | ModelTracker/models.py | boladjivinny/ModelTracker | 05693f09bf002cc934e9d682f2598c05d18c3089 | [
"MIT"
] | 5 | 2016-09-07T04:26:01.000Z | 2021-11-07T10:16:31.000Z | from django.db import models
from jsonfield import JSONField
| 33.133333 | 52 | 0.754527 | from django.db import models
from jsonfield import JSONField
class History(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=255,default="")
table=models.CharField(max_length=255)
primary_key=models.CharField(max_length=255)
old_state=JSONField(default={})
new_state=JSONField(default={})
done_by=models.CharField(max_length=255)
done_on=models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.id
| 24 | 389 | 22 |
a1ccc40256309bd83c5b22af9ac1a4bd1ac3d532 | 10,936 | py | Python | lib/data/datasets.py | Nirvana-fsociety/skip-ganomaly | 0d88b3904c6acec6cd9cb77222bb5467010955b6 | [
"MIT"
] | 158 | 2019-08-14T14:16:16.000Z | 2022-03-26T04:40:02.000Z | lib/data/datasets.py | Nirvana-fsociety/skip-ganomaly | 0d88b3904c6acec6cd9cb77222bb5467010955b6 | [
"MIT"
] | 24 | 2019-08-25T08:10:22.000Z | 2021-11-15T16:32:49.000Z | lib/data/datasets.py | Nirvana-fsociety/skip-ganomaly | 0d88b3904c6acec6cd9cb77222bb5467010955b6 | [
"MIT"
] | 68 | 2019-08-21T08:51:31.000Z | 2022-03-05T07:17:10.000Z | """
CREATE DATASETS
"""
# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915
import torch.utils.data as data
import torch
from random import shuffle
from torchvision.datasets import DatasetFolder
from pathlib import Path
from PIL import Image
import numpy as np
import os
import os.path
import random
import imageio
import numpy as np
# pylint: disable=E1101
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF'
]
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
latentz = self.noise[index]
# TODO: Return these variables in a dict.
# return img, latentz, index, target
return {'image': img, 'latentz': latentz, 'index': index, 'frame_gt': target}
# TODO: refactor cifar-mnist anomaly dataset functions into one generic function.
##
def get_cifar_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0):
"""[summary]
Arguments:
train_ds {Dataset - CIFAR10} -- Training dataset
valid_ds {Dataset - CIFAR10} -- Validation dataset.
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Get images and labels.
trn_img, trn_lbl = train_ds.data, np.array(train_ds.targets)
tst_img, tst_lbl = valid_ds.data, np.array(valid_ds.targets)
# --
# Find idx, img, lbl for abnormal and normal on org dataset.
nrm_trn_idx = np.where(trn_lbl != abn_cls_idx)[0]
abn_trn_idx = np.where(trn_lbl == abn_cls_idx)[0]
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_idx = np.where(tst_lbl != abn_cls_idx)[0]
abn_tst_idx = np.where(tst_lbl == abn_cls_idx)[0]
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
# - anomaly dataset
# . -> train
# . -> normal
# . -> test
# . -> normal
# . -> abnormal
train_ds.data = np.copy(nrm_trn_img)
valid_ds.data = np.concatenate((nrm_tst_img, abn_trn_img, abn_tst_img), axis=0)
train_ds.targets = np.copy(nrm_trn_lbl)
valid_ds.targets = np.concatenate((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), axis=0)
return train_ds, valid_ds
##
def get_mnist_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0):
"""[summary]
Arguments:
train_ds {Dataset - MNIST} -- Training dataset
valid_ds {Dataset - MNIST} -- Validation dataset.
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Get images and labels.
trn_img, trn_lbl = train_ds.data, train_ds.targets
tst_img, tst_lbl = valid_ds.data, valid_ds.targets
# --
# Find normal abnormal indexes.
# TODO: PyTorch v0.4 has torch.where function
nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0])
abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0])
nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0])
abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0])
# --
# Find normal and abnormal images
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
# --
# Find normal and abnormal labels.
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
train_ds.data = nrm_trn_img.clone()
valid_ds.data = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0)
train_ds.targets = nrm_trn_lbl.clone()
valid_ds.targets = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0)
return train_ds, valid_ds
##
def make_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0):
"""[summary]
Arguments:
train_ds {Dataset - MNIST} -- Training dataset
valid_ds {Dataset - MNIST} -- Validation dataset.
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Check the input type.
if isinstance(train_ds.data, np.ndarray):
train_ds.data = torch.from_numpy(train_ds.data)
valid_ds.data = torch.from_numpy(valid_ds.data)
train_ds.targets = torch.Tensor(train_ds.targets)
valid_ds.targets = torch.Tensor(valid_ds.targets)
# Get images and labels.
trn_img, trn_lbl = train_ds.data, train_ds.targets
tst_img, tst_lbl = valid_ds.data, valid_ds.targets
# --
# Find normal abnormal indexes.
# TODO: PyTorch v0.4 has torch.where function
nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0])
abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0])
nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0])
abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0])
# --
# Find normal and abnormal images
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
# --
# Find normal and abnormal labels.
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
train_ds.data = nrm_trn_img.clone()
valid_ds.data = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0)
train_ds.targets = nrm_trn_lbl.clone()
valid_ds.targets = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0)
return train_ds, valid_ds
| 34.939297 | 101 | 0.655541 | """
CREATE DATASETS
"""
# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915
import torch.utils.data as data
import torch
from random import shuffle
from torchvision.datasets import DatasetFolder
from pathlib import Path
from PIL import Image
import numpy as np
import os
import os.path
import random
import imageio
import numpy as np
# pylint: disable=E1101
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, nz=100, transform=None, target_transform=None,
loader=default_loader):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.noise = torch.FloatTensor(len(self.imgs), nz, 1, 1).normal_(0, 1)
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
latentz = self.noise[index]
# TODO: Return these variables in a dict.
# return img, latentz, index, target
return {'image': img, 'latentz': latentz, 'index': index, 'frame_gt': target}
def __setitem__(self, index, value):
self.noise[index] = value
def __len__(self):
return len(self.imgs)
# TODO: refactor cifar-mnist anomaly dataset functions into one generic function.
##
def get_cifar_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0):
"""[summary]
Arguments:
train_ds {Dataset - CIFAR10} -- Training dataset
valid_ds {Dataset - CIFAR10} -- Validation dataset.
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Get images and labels.
trn_img, trn_lbl = train_ds.data, np.array(train_ds.targets)
tst_img, tst_lbl = valid_ds.data, np.array(valid_ds.targets)
# --
# Find idx, img, lbl for abnormal and normal on org dataset.
nrm_trn_idx = np.where(trn_lbl != abn_cls_idx)[0]
abn_trn_idx = np.where(trn_lbl == abn_cls_idx)[0]
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_idx = np.where(tst_lbl != abn_cls_idx)[0]
abn_tst_idx = np.where(tst_lbl == abn_cls_idx)[0]
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
# - anomaly dataset
# . -> train
# . -> normal
# . -> test
# . -> normal
# . -> abnormal
train_ds.data = np.copy(nrm_trn_img)
valid_ds.data = np.concatenate((nrm_tst_img, abn_trn_img, abn_tst_img), axis=0)
train_ds.targets = np.copy(nrm_trn_lbl)
valid_ds.targets = np.concatenate((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), axis=0)
return train_ds, valid_ds
##
def get_mnist_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0):
"""[summary]
Arguments:
train_ds {Dataset - MNIST} -- Training dataset
valid_ds {Dataset - MNIST} -- Validation dataset.
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Get images and labels.
trn_img, trn_lbl = train_ds.data, train_ds.targets
tst_img, tst_lbl = valid_ds.data, valid_ds.targets
# --
# Find normal abnormal indexes.
# TODO: PyTorch v0.4 has torch.where function
nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0])
abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0])
nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0])
abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0])
# --
# Find normal and abnormal images
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
# --
# Find normal and abnormal labels.
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
train_ds.data = nrm_trn_img.clone()
valid_ds.data = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0)
train_ds.targets = nrm_trn_lbl.clone()
valid_ds.targets = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0)
return train_ds, valid_ds
##
def make_anomaly_dataset(train_ds, valid_ds, abn_cls_idx=0):
"""[summary]
Arguments:
train_ds {Dataset - MNIST} -- Training dataset
valid_ds {Dataset - MNIST} -- Validation dataset.
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Check the input type.
if isinstance(train_ds.data, np.ndarray):
train_ds.data = torch.from_numpy(train_ds.data)
valid_ds.data = torch.from_numpy(valid_ds.data)
train_ds.targets = torch.Tensor(train_ds.targets)
valid_ds.targets = torch.Tensor(valid_ds.targets)
# Get images and labels.
trn_img, trn_lbl = train_ds.data, train_ds.targets
tst_img, tst_lbl = valid_ds.data, valid_ds.targets
# --
# Find normal abnormal indexes.
# TODO: PyTorch v0.4 has torch.where function
nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0])
abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0])
nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0])
abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0])
# --
# Find normal and abnormal images
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
# --
# Find normal and abnormal labels.
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
train_ds.data = nrm_trn_img.clone()
valid_ds.data = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0)
train_ds.targets = nrm_trn_lbl.clone()
valid_ds.targets = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0)
return train_ds, valid_ds
| 2,134 | 0 | 219 |
867e6dce8c62af6dcb2d30f65bd1ed5909e8b4db | 4,472 | py | Python | code/train.py | EricZhangSCUT/DeepPSC | 19a2b94cfacd6591e8e8c66cc69baf0422b4e9d0 | [
"MIT"
] | null | null | null | code/train.py | EricZhangSCUT/DeepPSC | 19a2b94cfacd6591e8e8c66cc69baf0422b4e9d0 | [
"MIT"
] | null | null | null | code/train.py | EricZhangSCUT/DeepPSC | 19a2b94cfacd6591e8e8c66cc69baf0422b4e9d0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from model import DeepPSC
import dataset
from torch.utils.data import DataLoader
import pathlib
import os
import time
import logging
import argparse
from test import test
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--loc_dim', type=int, default=512)
parser.add_argument('--glo_dim', type=int, default=1024)
parser.add_argument('--tgt_dim', type=int, default=4)
parser.add_argument('--set', type=str, default=None)
parser.add_argument('--note', type=str, default=None)
parser.add_argument('--epoch', type=int, default=30)
args = parser.parse_args()
train_name = '-'.join([arg for arg in [args.set, args.note] if arg])
save_dir = '../output/%s' % train_name
model_dir = '../output/%s/model' % train_name
dirs = [save_dir, model_dir]
for folder in dirs:
pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
logging.basicConfig(level=logging.INFO, filename='../logs/log_%s.txt' %
train_name, filemode='w', format='%(message)s')
logger = logging.getLogger(__name__)
dataset = dataset.Image2Tor
inp_dim = 5
train_dataset = dataset(
dataset='nr40', file_list='train_%s' % args.set if args.set else 'train')
val_dataset = dataset(
dataset='nr40', file_list='val_%s' % args.set if args.set else 'val')
test_dataset = dataset('test', with_target=False)
train_loader = DataLoader(dataset=train_dataset, shuffle=True,
num_workers=32, pin_memory=True)
val_loader = DataLoader(dataset=val_dataset, num_workers=32, pin_memory=True)
test_loader = DataLoader(dataset=test_dataset, num_workers=32, pin_memory=True)
model = DeepPSC(dims=[inp_dim, args.loc_dim, args.glo_dim, args.tgt_dim])
logger.info('-----Model-----')
logger.info(model)
logger.info('-----Model-----\n\n')
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=10e-6)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=len(train_dataset))
warmup_epochs = 3
if __name__ == '__main__':
total_iters = 0
for epoch in range(args.epoch):
epoch_start_time = time.time()
epoch_iter = 0
losses = 0
logger.info('epoch %s training' % epoch)
for inp, tgt, filename, lengths in train_loader:
optimizer.zero_grad()
inp = inp[0].cuda(non_blocking=True)
tgt = tgt[0].cuda(non_blocking=True)
out = model(inp, lengths).squeeze(1).transpose(0, 1)
loss = loss_function(out, tgt)
losses += loss.item()
total_iters += 1
epoch_iter += 1
loss.backward()
optimizer.step()
if total_iters % 20 == 0:
logger.info('iters %s train_loss=%s' %
(total_iters, losses / 20))
losses = 0
if total_iters % 1000 == 0:
model.save_model(os.path.join(model_dir, 'last_model'))
if epoch >= warmup_epochs:
scheduler.step()
logger.info('epoch %s validating...' % epoch)
model.save_model(os.path.join(model_dir, '%s_model' % epoch))
model.eval()
with torch.no_grad():
losses = 0
for inp, tgt, filename, lengths in val_loader:
inp = inp[0].cuda(non_blocking=True)
tgt = tgt[0].cuda(non_blocking=True)
out = model(inp, lengths).squeeze(1).transpose(0, 1)
loss = loss_function(out, tgt)
losses += loss.item()
logger.info('epoch %d, mean_val_loss= %f' %
(epoch, losses / len(val_dataset)))
model.train()
logger.info('End of epoch %d \t Time Taken: %d sec' %
(epoch, time.time() - epoch_start_time))
tester = test(model, train_name=train_name, test_loader=test_loader)
tester.test_model('29')
rmsd = np.round(np.mean(np.array(tester.rmsds), axis=0), 3)
gdt = np.round(
np.mean(np.mean(np.array(tester.gdts), axis=-1), axis=0)*100, 3)
rama = np.round(np.mean(np.array(tester.ramas), axis=0)*100, 3)
logger.info('\n\n--Results--')
logger.info('-RMSD-')
logger.info(rmsd)
logger.info('-GDT-')
logger.info(gdt)
logger.info('-RAMA-')
logger.info(rama)
| 32.405797 | 80 | 0.605769 | import torch
import torch.nn as nn
from model import DeepPSC
import dataset
from torch.utils.data import DataLoader
import pathlib
import os
import time
import logging
import argparse
from test import test
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--loc_dim', type=int, default=512)
parser.add_argument('--glo_dim', type=int, default=1024)
parser.add_argument('--tgt_dim', type=int, default=4)
parser.add_argument('--set', type=str, default=None)
parser.add_argument('--note', type=str, default=None)
parser.add_argument('--epoch', type=int, default=30)
args = parser.parse_args()
train_name = '-'.join([arg for arg in [args.set, args.note] if arg])
save_dir = '../output/%s' % train_name
model_dir = '../output/%s/model' % train_name
dirs = [save_dir, model_dir]
for folder in dirs:
pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
logging.basicConfig(level=logging.INFO, filename='../logs/log_%s.txt' %
train_name, filemode='w', format='%(message)s')
logger = logging.getLogger(__name__)
dataset = dataset.Image2Tor
inp_dim = 5
train_dataset = dataset(
dataset='nr40', file_list='train_%s' % args.set if args.set else 'train')
val_dataset = dataset(
dataset='nr40', file_list='val_%s' % args.set if args.set else 'val')
test_dataset = dataset('test', with_target=False)
train_loader = DataLoader(dataset=train_dataset, shuffle=True,
num_workers=32, pin_memory=True)
val_loader = DataLoader(dataset=val_dataset, num_workers=32, pin_memory=True)
test_loader = DataLoader(dataset=test_dataset, num_workers=32, pin_memory=True)
model = DeepPSC(dims=[inp_dim, args.loc_dim, args.glo_dim, args.tgt_dim])
logger.info('-----Model-----')
logger.info(model)
logger.info('-----Model-----\n\n')
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), 3e-4, weight_decay=10e-6)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=len(train_dataset))
warmup_epochs = 3
if __name__ == '__main__':
total_iters = 0
for epoch in range(args.epoch):
epoch_start_time = time.time()
epoch_iter = 0
losses = 0
logger.info('epoch %s training' % epoch)
for inp, tgt, filename, lengths in train_loader:
optimizer.zero_grad()
inp = inp[0].cuda(non_blocking=True)
tgt = tgt[0].cuda(non_blocking=True)
out = model(inp, lengths).squeeze(1).transpose(0, 1)
loss = loss_function(out, tgt)
losses += loss.item()
total_iters += 1
epoch_iter += 1
loss.backward()
optimizer.step()
if total_iters % 20 == 0:
logger.info('iters %s train_loss=%s' %
(total_iters, losses / 20))
losses = 0
if total_iters % 1000 == 0:
model.save_model(os.path.join(model_dir, 'last_model'))
if epoch >= warmup_epochs:
scheduler.step()
logger.info('epoch %s validating...' % epoch)
model.save_model(os.path.join(model_dir, '%s_model' % epoch))
model.eval()
with torch.no_grad():
losses = 0
for inp, tgt, filename, lengths in val_loader:
inp = inp[0].cuda(non_blocking=True)
tgt = tgt[0].cuda(non_blocking=True)
out = model(inp, lengths).squeeze(1).transpose(0, 1)
loss = loss_function(out, tgt)
losses += loss.item()
logger.info('epoch %d, mean_val_loss= %f' %
(epoch, losses / len(val_dataset)))
model.train()
logger.info('End of epoch %d \t Time Taken: %d sec' %
(epoch, time.time() - epoch_start_time))
tester = test(model, train_name=train_name, test_loader=test_loader)
tester.test_model('29')
rmsd = np.round(np.mean(np.array(tester.rmsds), axis=0), 3)
gdt = np.round(
np.mean(np.mean(np.array(tester.gdts), axis=-1), axis=0)*100, 3)
rama = np.round(np.mean(np.array(tester.ramas), axis=0)*100, 3)
logger.info('\n\n--Results--')
logger.info('-RMSD-')
logger.info(rmsd)
logger.info('-GDT-')
logger.info(gdt)
logger.info('-RAMA-')
logger.info(rama)
| 0 | 0 | 0 |
16583d58cb50dc29fe87a218adc0eb4eca4728bf | 3,273 | py | Python | src/vmware/azext_vmware/tests/latest/test_datastores_scenario.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-02-18T00:16:47.000Z | 2022-02-18T00:16:47.000Z | src/vmware/azext_vmware/tests/latest/test_datastores_scenario.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/vmware/azext_vmware/tests/latest/test_datastores_scenario.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
from msrestazure.azure_exceptions import CloudError
| 60.611111 | 221 | 0.659028 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
from msrestazure.azure_exceptions import CloudError
class VmwareDatastoresScenarioTest(ScenarioTest):
def setUp(self):
# https://vcrpy.readthedocs.io/en/latest/configuration.html#request-matching
self.vcr.match_on = ['scheme', 'method', 'path', 'query'] # not 'host', 'port'
super(VmwareDatastoresScenarioTest, self).setUp()
@ResourceGroupPreparer(name_prefix='cli_test_vmware_datastores')
def test_vmware_datastores(self):
self.kwargs.update({
# 'loc': 'centralus',
# 'privatecloud': 'cloud1',
# 'cluster': 'pycluster1',
'rg': 'rasivagu-sddc-rg',
'privatecloud': 'rasivagu-mock-sddc',
'cluster': 'Cluster-1',
'volume_id': '/subscriptions/11111111-1111-1111-1111-111111111111/resourceGroups/ResourceGroup1/providers/Microsoft.NetApp/netAppAccounts/NetAppAccount1/capacityPools/CapacityPool1/volumes/NFSVol1',
'target_id': '/subscriptions/ba75e79b-dd95-4025-9dbf-3a7ae8dff2b5/resourceGroups/rasivagu-df-rg/providers/Microsoft.StoragePool/diskPools/rasivagu-df-diskpool/iscsiTargets/rasivagu-df-target'
})
# Create a new iSCSI based datastore
self.cmd('az vmware datastore disk-pool-volume create --name iSCSIDatastore1 --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster} --target-id {target_id} --lun-name lun0')
# "The subscription '11111111-1111-1111-1111-111111111111' could not be found.
# Get a iSCSI datastore
self.cmd('az vmware datastore show --name iSCSIDatastore1 --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster}')
# # List all existing datastores
self.cmd('az vmware datastore list --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster}')
# Create a new ANF based datastore
# self.cmd('az vmware datastore netapp-volume create --name ANFDatastore1 --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster} --volume-id {volume_id}')
# "ANF datastore is not enabled for the cloud SAN version 'v1'
# Get the newly created ANF based datastore
# self.cmd('az vmware datastore show --name ANFDatastore1 --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster}')
# Delete the newly created ANF based datastore
self.cmd('az vmware datastore delete --name iSCSIDatastore1 --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster} --yes')
# Create a new iSCSI based datastore with --mount-option as ATTACH
self.cmd('az vmware datastore disk-pool-volume create --name iSCSIDatastore1 --resource-group {rg} --private-cloud {privatecloud} --cluster {cluster} --target-id {target_id} --lun-name lun0 --mount-option ATTACH')
| 2,598 | 150 | 23 |
3f15425227aaae16ccd6a42bd73b24824aed6eb8 | 1,605 | py | Python | nicos_mlz/poli/devices/lubrication.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/poli/devices/lubrication.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_mlz/poli/devices/lubrication.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""Lubrication device for lifting counter."""
from nicos.core import status
from nicos.devices.tango import DigitalOutput
class LubeSwitch(DigitalOutput):
"""Special SPS digital output whose readback is a status value."""
| 38.214286 | 79 | 0.650467 | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""Lubrication device for lifting counter."""
from nicos.core import status
from nicos.devices.tango import DigitalOutput
class LubeSwitch(DigitalOutput):
"""Special SPS digital output whose readback is a status value."""
def doRead(self, maxage=0):
return self.target # no readback possible
def doStatus(self, maxage=0):
statusval = self._dev.value # readout is status
if statusval != 0:
return status.ERROR, 'error status: %d' % statusval
return status.OK, ''
| 242 | 0 | 54 |
c3f4e33f018b1540699f3d68b9901cb4709286ca | 2,090 | py | Python | src/neochi/brain/fits.py | neochi/brain | 79afa004ac3129645d9d751b8df5a6a9f934b754 | [
"MIT"
] | null | null | null | src/neochi/brain/fits.py | neochi/brain | 79afa004ac3129645d9d751b8df5a6a9f934b754 | [
"MIT"
] | 17 | 2020-01-28T22:51:19.000Z | 2022-03-11T23:51:15.000Z | src/neochi/brain/fits.py | neochi/brain | 79afa004ac3129645d9d751b8df5a6a9f934b754 | [
"MIT"
] | 1 | 2019-04-29T09:22:59.000Z | 2019-04-29T09:22:59.000Z | # MIT License
#
# Copyright (c) 2019 Morning Project Samurai (MPS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'Junya Kaneko <junya@mpsamurai.org>'
import os
from neochi.core.utils import environ
from . import data_loaders
from . import models
| 43.541667 | 115 | 0.744498 | # MIT License
#
# Copyright (c) 2019 Morning Project Samurai (MPS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'Junya Kaneko <junya@mpsamurai.org>'
import os
from neochi.core.utils import environ
from . import data_loaders
from . import models
def fit_behavior_classifier():
kwargs = environ.get_kwargs('BRAIN_BEHAVIOR_CLASSIFIER')
X, y = data_loaders.load_classification_data(kwargs['data_dir'], kwargs['labels'])
model = models.BehaviorClassifier(kwargs['shape'], kwargs['fps'], kwargs['labels'])
model.fit(X, y, kwargs['optimizer'], kwargs['loss'], kwargs['metrics'], kwargs['train_size'], kwargs['epochs'])
if not os.path.exists(kwargs['save_dir']):
os.mkdir(kwargs['save_dir'])
model.save(kwargs['save_dir'])
def fit_sleep_detector():
kwargs = environ.get_kwargs('BRAIN_SLEEP_DETECTOR')
model = models.SleepDetector(kwargs['time_steps'], kwargs['weights'], kwargs['fps'], kwargs['threshold'])
if not os.path.exists(kwargs['save_dir']):
os.mkdir(kwargs['save_dir'])
model.save(kwargs['save_dir'])
| 769 | 0 | 46 |
0beef295c183b174fa594e2b2f66f198432374c0 | 866 | py | Python | projects/templatetags/include_as_js_str.py | wedk/todomvc-django-ujs | 3909f2448ee32412f0eebe1007d34fb66cbf7185 | [
"MIT"
] | 1 | 2016-01-08T21:58:02.000Z | 2016-01-08T21:58:02.000Z | projects/templatetags/include_as_js_str.py | wedk/todomvc-django-ujs | 3909f2448ee32412f0eebe1007d34fb66cbf7185 | [
"MIT"
] | null | null | null | projects/templatetags/include_as_js_str.py | wedk/todomvc-django-ujs | 3909f2448ee32412f0eebe1007d34fb66cbf7185 | [
"MIT"
] | null | null | null | # templatetags/include_as_js_str.py
from django import template
from django.template.loader_tags import do_include
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
register = template.Library()
@register.tag('include_as_js_str')
| 30.928571 | 102 | 0.683603 | # templatetags/include_as_js_str.py
from django import template
from django.template.loader_tags import do_include
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
register = template.Library()
def _escape_html_for_js(value):
return mark_safe(force_text(value).lstrip()
.replace('\\', '\\\\').replace('</', '<\/').replace('"', '\\"').replace("'", "\\'")
.replace('\r\n', '\\n').replace('\n', '\\n').replace('\r', '\\n'))
class IncludeAsJsStrNode(template.Node):
def __init__(self, include_node):
self.include_node = include_node
def render(self, context):
return _escape_html_for_js(self.include_node.render(context))
@register.tag('include_as_js_str')
def do_include_as_js_str(parser, token):
return IncludeAsJsStrNode(do_include(parser, token))
| 449 | 19 | 122 |
69bdf78566b6cf86b3a31da9e8d15c104b9ca683 | 96,186 | py | Python | lenv/lib/python3.6/site-packages/Crypto/SelfTest/Hash/test_vectors/keccak/ShortMsgKAT_384.txt.py | shrey-c/DataLeakageDjango | a827c5a09e5501921f9fb97b656755671238dd63 | [
"BSD-3-Clause"
] | 6 | 2020-05-03T12:03:21.000Z | 2020-09-07T08:33:58.000Z | lenv/lib/python3.6/site-packages/Crypto/SelfTest/Hash/test_vectors/keccak/ShortMsgKAT_384.txt.py | shrey-c/DataLeakageDjango | a827c5a09e5501921f9fb97b656755671238dd63 | [
"BSD-3-Clause"
] | 3 | 2020-04-17T06:50:44.000Z | 2022-01-13T02:16:48.000Z | lenv/lib/python3.6/site-packages/Crypto/SelfTest/Hash/test_vectors/keccak/ShortMsgKAT_384.txt.py | shrey-c/DataLeakageDjango | a827c5a09e5501921f9fb97b656755671238dd63 | [
"BSD-3-Clause"
] | null | null | null | X XXXX XXXXXXXXX XXXX XXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX X X
XXX X XX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X X
XXX X XX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
| 93.657254 | 516 | 0.973302 | X XXXX XXXXXXXXX XXXX XXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX X X
XXX X XX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X X
XXX X XX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XX
XXX X XXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX X XXXX
XXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
| 0 | 0 | 0 |
23ad61f795e61dca9e73baf60cd302ddae30a928 | 865 | py | Python | old_run.py | csuastt/liusm | 818588f93bf4edeee2ec8eaee9579132943caf5a | [
"MIT"
] | 1 | 2021-12-03T09:05:30.000Z | 2021-12-03T09:05:30.000Z | old_run.py | csuastt/Stereo-Enhancement-of-Thermal-Imaging | 818588f93bf4edeee2ec8eaee9579132943caf5a | [
"MIT"
] | null | null | null | old_run.py | csuastt/Stereo-Enhancement-of-Thermal-Imaging | 818588f93bf4edeee2ec8eaee9579132943caf5a | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
imgL = cv.imread('images/example_l.png', 0)
imgR = cv.imread('images/example_r.png', 0)
window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
stereo = cv.StereoSGBM_create(
minDisparity=-1,
numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=window_size,
P1=8 * 3 * window_size,
# wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size,
disp12MaxDiff=12,
uniquenessRatio=10,
speckleWindowSize=50,
speckleRange=32,
preFilterCap=63,
mode=cv.STEREO_SGBM_MODE_SGBM_3WAY
)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
| 34.6 | 132 | 0.715607 | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
imgL = cv.imread('images/example_l.png', 0)
imgR = cv.imread('images/example_r.png', 0)
window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
stereo = cv.StereoSGBM_create(
minDisparity=-1,
numDisparities=5*16, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=window_size,
P1=8 * 3 * window_size,
# wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 3 * window_size,
disp12MaxDiff=12,
uniquenessRatio=10,
speckleWindowSize=50,
speckleRange=32,
preFilterCap=63,
mode=cv.STEREO_SGBM_MODE_SGBM_3WAY
)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
| 0 | 0 | 0 |
11384b21e98956d188e35014bcc9f6c4d8e3e3f9 | 2,374 | py | Python | test/test_data_on_home_page.py | SherMary/python_training | 071f9405ffb97ea2243ac2906713c6fa8ac62ba7 | [
"Apache-2.0"
] | null | null | null | test/test_data_on_home_page.py | SherMary/python_training | 071f9405ffb97ea2243ac2906713c6fa8ac62ba7 | [
"Apache-2.0"
] | null | null | null | test/test_data_on_home_page.py | SherMary/python_training | 071f9405ffb97ea2243ac2906713c6fa8ac62ba7 | [
"Apache-2.0"
] | null | null | null | import re
from model.contact import Contact
"""def test_data_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(
contact_from_edit_page)
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(
contact_from_edit_page)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address"""
| 51.608696 | 114 | 0.720303 | import re
from model.contact import Contact
def test_data_on_home_page_compared_with_bd(app, db):
contact_list_from_db = sorted(db.get_contact_list(), key=Contact.id_or_max)
contact_list_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
assert len(contact_list_from_home_page) == len(contact_list_from_db)
assert contact_list_from_home_page == contact_list_from_db
for i in range(0, len(contact_list_from_home_page)):
contact_from_home_page = contact_list_from_home_page[i]
contact_from_db = contact_list_from_db[i]
assert contact_from_home_page.lastname == contact_from_db.lastname
assert contact_from_home_page.firstname == contact_from_db.firstname
assert contact_from_home_page.address == contact_from_db.address
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_db)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_db)
"""def test_data_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(
contact_from_edit_page)
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(
contact_from_edit_page)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address"""
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.telhome, contact.telmobile, contact.telwork, contact.home2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3])))) | 1,565 | 0 | 92 |
383d3c089a3bb44aac080ea0cddbd7b8e38faac8 | 1,345 | py | Python | bindings/python/tests/__main__.py | finnhaedicke/metaSMT | 949245da0bf0f3c042cb589aaea5d015e2ed9e9a | [
"MIT"
] | 33 | 2015-04-09T14:14:25.000Z | 2022-03-27T08:55:58.000Z | bindings/python/tests/__main__.py | finnhaedicke/metaSMT | 949245da0bf0f3c042cb589aaea5d015e2ed9e9a | [
"MIT"
] | 28 | 2015-03-13T14:21:33.000Z | 2019-04-02T07:59:34.000Z | bindings/python/tests/__main__.py | finnhaedicke/metaSMT | 949245da0bf0f3c042cb589aaea5d015e2ed9e9a | [
"MIT"
] | 9 | 2015-04-22T18:10:51.000Z | 2021-08-06T12:44:12.000Z | import os
import re
import sys
import random
import unittest
test_str_re = re.compile(r'([^ ]+) \((.*)\)')
if __name__ == "__main__":
tests = unittest.defaultTestLoader.discover(os.path.dirname(__file__), pattern='*.py')
if len(sys.argv) == 2 and sys.argv[1] == '--discover':
map_tests(print_test, tests)
else:
random.seed()
if len(sys.argv) == 0:
unittest.main()
else:
lookup = {}
map_tests( collect_tests(lookup), tests)
suite = unittest.TestSuite()
success = True
for t in sys.argv[1:]:
tc = lookup[tuple(t.split('.'))]
suite.addTest(tc)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if len(result.errors) > 0 or len(result.failures) > 0:
success = False
if success:
sys.exit(0)
else:
sys.exit(1)
| 23.189655 | 90 | 0.579926 | import os
import re
import sys
import random
import unittest
test_str_re = re.compile(r'([^ ]+) \((.*)\)')
def test_id(test):
m = test_str_re.match(str(test))
if m is None:
return None
t, h = m.groups()
return tuple(h.split('.')+[t])
def map_tests(fun, test):
if isinstance(test, unittest.suite.TestSuite):
map(lambda t: map_tests(fun, t), test)
else:
fun(test)
def print_test(test):
print '.'.join(test_id(str(test)))
def collect_tests(dic):
def fun(test):
id = test_id(test)
dic[id] = test
return fun
if __name__ == "__main__":
tests = unittest.defaultTestLoader.discover(os.path.dirname(__file__), pattern='*.py')
if len(sys.argv) == 2 and sys.argv[1] == '--discover':
map_tests(print_test, tests)
else:
random.seed()
if len(sys.argv) == 0:
unittest.main()
else:
lookup = {}
map_tests( collect_tests(lookup), tests)
suite = unittest.TestSuite()
success = True
for t in sys.argv[1:]:
tc = lookup[tuple(t.split('.'))]
suite.addTest(tc)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if len(result.errors) > 0 or len(result.failures) > 0:
success = False
if success:
sys.exit(0)
else:
sys.exit(1)
| 350 | 0 | 92 |
bab7df1647e393fd0bdd4649c9e0b2430d2831de | 2,584 | py | Python | gfapy/line/common/version_conversion.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 44 | 2017-03-18T08:08:04.000Z | 2021-11-10T16:11:15.000Z | gfapy/line/common/version_conversion.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 22 | 2017-04-04T21:20:31.000Z | 2022-03-09T19:05:30.000Z | gfapy/line/common/version_conversion.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 5 | 2017-07-07T02:56:56.000Z | 2020-09-30T20:10:49.000Z | import gfapy
try:
from functools import partialmethod
except ImportError:
#for compatibility with old python versions
for shall_version in ["gfa1", "gfa2"]:
setattr(VersionConversion, "to_"+shall_version+"_s",
partialmethod(VersionConversion.to_version_s,
version = shall_version))
setattr(VersionConversion, "_to_"+shall_version+"_a",
partialmethod(VersionConversion._to_version_a,
version = shall_version))
setattr(VersionConversion, "to_"+shall_version,
partialmethod(VersionConversion.to_version,
version = shall_version))
| 26.639175 | 80 | 0.628483 | import gfapy
try:
from functools import partialmethod
except ImportError:
#for compatibility with old python versions
def partialmethod(method, **kwargs):
return lambda self: method(self, **kwargs)
class VersionConversion:
@property
def version(self):
"""
Returns
-------
gfapy.VERSIONS, None
GFA specification version
"""
return self._version
@property
def dialect(self):
"""
Returns
-------
gfapy.DIALECTS, None
GFA specification version
"""
return self._dialect
def to_version_s(self, version):
"""
Returns
-------
str
A string representation of self.
"""
return gfapy.Line.SEPARATOR.join(getattr(self, "_to_"+version+"_a")())
def _to_version_a(self, version):
"""
.. note::
The default is an alias of to_list() if version is equal to the
version of the line, and an empty list otherwise.
gfapy.Line subclasses can redefine this method to convert
between versions.
Returns
-------
str list
A list of string representations of the fields.
"""
if version == self._version:
return self.to_list()
else:
return []
def to_version(self, version, raise_on_failure=True):
"""
Returns
-------
gfapy.Line
Conversion to the selected version.
"""
if version == self._version:
return self
elif version not in gfapy.VERSIONS:
raise gfapy.VersionError("Version unknown ({})".format(version))
else:
l = getattr(self, "_to_"+version+"_a")()
if l:
try:
converted = gfapy.Line(l, version=version, vlevel=self.vlevel)
except:
raise gfapy.RuntimeError("Conversion to {} failed\n".format(version)+
"Line: {}".format(str(self)))
return converted
elif raise_on_failure:
raise gfapy.VersionError("Records of type {} ".format(self.record_type)+
"cannot be converted from version {} ".format(self._version)+
"to version {}".format(version))
else:
return None
for shall_version in ["gfa1", "gfa2"]:
setattr(VersionConversion, "to_"+shall_version+"_s",
partialmethod(VersionConversion.to_version_s,
version = shall_version))
setattr(VersionConversion, "_to_"+shall_version+"_a",
partialmethod(VersionConversion._to_version_a,
version = shall_version))
setattr(VersionConversion, "to_"+shall_version,
partialmethod(VersionConversion.to_version,
version = shall_version))
| 62 | 1,869 | 47 |
b216f08720abc4bf788695cdf4247b5f969a7fc9 | 2,795 | py | Python | run_eval.py | DataManagementLab/thesis-fl_client-side_validation | 0f6a35d08966133e6a8c13a110b9307d91f2d9cb | [
"MIT"
] | null | null | null | run_eval.py | DataManagementLab/thesis-fl_client-side_validation | 0f6a35d08966133e6a8c13a110b9307d91f2d9cb | [
"MIT"
] | null | null | null | run_eval.py | DataManagementLab/thesis-fl_client-side_validation | 0f6a35d08966133e6a8c13a110b9307d91f2d9cb | [
"MIT"
] | null | null | null | import argparse
from cliva_fl.utils import Logger, Plotter
from pathlib import Path
from datetime import datetime
parser = argparse.ArgumentParser(description='Argument parser for log processing and plot creation')
parser.add_argument('-n', '--num', type=int, required=False, help='Index of previously executed experiment to select for processing. Last experiment equals --num 1.')
parser.add_argument('-d', '--dir', type=str, required=False, help='Name of experiment directory to select for processing.')
parser.add_argument('-l', '--log_dir', type=str, required=False, default='logs', help='Log directory to select for processing.')
parser.add_argument('-g', '--generate', action='store_true', required=False, help='Flag to indicate weather metrics should be (re)generated from logs.')
parser.add_argument('-c', '--clean', action='store_true', required=False, help='Flag to indicate a full clean-up of all generated files.')
parser.add_argument('-m', '--metrics', nargs='+', required=True, help='List of metrics to be computed. Available metrics are {}'.format(Plotter.METRICS))
parser.add_argument('--xmin', type=float, default=None, required=False, help='Minimum value of x-axes in plot.')
parser.add_argument('--xmax', type=float, default=None, required=False, help='Maximum value of x-axes in plot.')
parser.add_argument('--ymin', type=float, default=None, required=False, help='Minimum value of y-axes in plot.')
parser.add_argument('--ymax', type=float, default=None, required=False, help='Maximum value of y-axes in plot.')
args = parser.parse_args()
assert args.num or args.dir, 'You are required to specify num or dir parameter.'
assert not (args.num and args.dir), 'You can not use num and dir parameter simultaneously to select a log directory'
if args.num:
exp_dirs = sorted(list(Path(args.log_dir).glob('experiment_*')))
assert args.num <= len(exp_dirs), 'Num can not be larger than the number of existing experiment directories'
p = exp_dirs[-args.num].name
elif args.dir:
p = Path(args.dir).name
_, YEAR, MONTH, DAY, _, TIME = p.split('_')
HOUR, MINUTE = TIME.split(':')
print(f'Experiment: {p}\nDate: {DAY}.{MONTH}.{YEAR}\tTime: {HOUR}:{MINUTE}')
timestamp = datetime(year=int(YEAR), month=int(MONTH), day=int(DAY), hour=int(HOUR), minute=int(MINUTE))
logger = Logger(base_path=args.log_dir, timestamp=timestamp)
plotter = Plotter(logger)
if args.clean:
print('Cleaning up all generated files.')
plotter.clear_metrics()
plotter.clear_plots()
if args.generate:
print('Generating metrics from logs.')
plotter.clear_metrics()
plotter.generate()
for metric in args.metrics:
assert metric in Plotter.METRICS, f'Metric {metric} is not a valid metric.'
plotter.plot_metric(metric, args.ymin, args.ymax, args.xmin, args.xmax) | 52.735849 | 166 | 0.734884 | import argparse
from cliva_fl.utils import Logger, Plotter
from pathlib import Path
from datetime import datetime
parser = argparse.ArgumentParser(description='Argument parser for log processing and plot creation')
parser.add_argument('-n', '--num', type=int, required=False, help='Index of previously executed experiment to select for processing. Last experiment equals --num 1.')
parser.add_argument('-d', '--dir', type=str, required=False, help='Name of experiment directory to select for processing.')
parser.add_argument('-l', '--log_dir', type=str, required=False, default='logs', help='Log directory to select for processing.')
parser.add_argument('-g', '--generate', action='store_true', required=False, help='Flag to indicate weather metrics should be (re)generated from logs.')
parser.add_argument('-c', '--clean', action='store_true', required=False, help='Flag to indicate a full clean-up of all generated files.')
parser.add_argument('-m', '--metrics', nargs='+', required=True, help='List of metrics to be computed. Available metrics are {}'.format(Plotter.METRICS))
parser.add_argument('--xmin', type=float, default=None, required=False, help='Minimum value of x-axes in plot.')
parser.add_argument('--xmax', type=float, default=None, required=False, help='Maximum value of x-axes in plot.')
parser.add_argument('--ymin', type=float, default=None, required=False, help='Minimum value of y-axes in plot.')
parser.add_argument('--ymax', type=float, default=None, required=False, help='Maximum value of y-axes in plot.')
args = parser.parse_args()
assert args.num or args.dir, 'You are required to specify num or dir parameter.'
assert not (args.num and args.dir), 'You can not use num and dir parameter simultaneously to select a log directory'
if args.num:
exp_dirs = sorted(list(Path(args.log_dir).glob('experiment_*')))
assert args.num <= len(exp_dirs), 'Num can not be larger than the number of existing experiment directories'
p = exp_dirs[-args.num].name
elif args.dir:
p = Path(args.dir).name
_, YEAR, MONTH, DAY, _, TIME = p.split('_')
HOUR, MINUTE = TIME.split(':')
print(f'Experiment: {p}\nDate: {DAY}.{MONTH}.{YEAR}\tTime: {HOUR}:{MINUTE}')
timestamp = datetime(year=int(YEAR), month=int(MONTH), day=int(DAY), hour=int(HOUR), minute=int(MINUTE))
logger = Logger(base_path=args.log_dir, timestamp=timestamp)
plotter = Plotter(logger)
if args.clean:
print('Cleaning up all generated files.')
plotter.clear_metrics()
plotter.clear_plots()
if args.generate:
print('Generating metrics from logs.')
plotter.clear_metrics()
plotter.generate()
for metric in args.metrics:
assert metric in Plotter.METRICS, f'Metric {metric} is not a valid metric.'
plotter.plot_metric(metric, args.ymin, args.ymax, args.xmin, args.xmax) | 0 | 0 | 0 |
f40be914300d31fdb5b13fae195c20d13d17d517 | 2,391 | py | Python | tests/dashboards/security/base_case.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | tests/dashboards/security/base_case.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | tests/dashboards/security/base_case.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | from typing import List, Optional
from flask import escape, Response
from rabbitai.models.dashboard import Dashboard
from tests.dashboards.base_case import DashboardTestCase
| 37.952381 | 83 | 0.669594 | from typing import List, Optional
from flask import escape, Response
from rabbitai.models.dashboard import Dashboard
from tests.dashboards.base_case import DashboardTestCase
class BaseTestDashboardSecurity(DashboardTestCase):
def tearDown(self) -> None:
self.clean_created_objects()
def assert_dashboard_api_response(
self, response: Response, dashboard_to_access: Dashboard
) -> None:
self.assert200(response)
assert response.json["id"] == dashboard_to_access.id
def assert_dashboards_list_view_response(
self,
response: Response,
expected_counts: int,
expected_dashboards: Optional[List[Dashboard]] = None,
not_expected_dashboards: Optional[List[Dashboard]] = None,
) -> None:
self.assert200(response)
response_html = response.data.decode("utf-8")
if expected_counts == 0:
assert "No records found" in response_html
else:
# # a way to parse number of dashboards returns
# in the list view as an html response
assert (
"Record Count:</strong> {count}".format(count=str(expected_counts))
in response_html
)
expected_dashboards = expected_dashboards or []
for dashboard in expected_dashboards:
assert dashboard.url in response_html
not_expected_dashboards = not_expected_dashboards or []
for dashboard in not_expected_dashboards:
assert dashboard.url not in response_html
def assert_dashboards_api_response(
self,
response: Response,
expected_counts: int,
expected_dashboards: Optional[List[Dashboard]] = None,
not_expected_dashboards: Optional[List[Dashboard]] = None,
) -> None:
self.assert200(response)
response_data = response.json
assert response_data["count"] == expected_counts
response_dashboards_url = set(
map(lambda dash: dash["url"], response_data["result"])
)
expected_dashboards = expected_dashboards or []
for dashboard in expected_dashboards:
assert dashboard.url in response_dashboards_url
not_expected_dashboards = not_expected_dashboards or []
for dashboard in not_expected_dashboards:
assert dashboard.url not in response_dashboards_url
| 2,054 | 30 | 130 |
514bacf6182bd9fb1311eb155f89189b33e7f933 | 3,279 | py | Python | tmslack/slack.py | arodioukov/tmslack | 65ffc864b6a872b732ae56e9be82ddba31c3fc12 | [
"Apache-2.0"
] | 1 | 2019-01-22T17:36:04.000Z | 2019-01-22T17:36:04.000Z | tmslack/slack.py | arodioukov/tmslack | 65ffc864b6a872b732ae56e9be82ddba31c3fc12 | [
"Apache-2.0"
] | null | null | null | tmslack/slack.py | arodioukov/tmslack | 65ffc864b6a872b732ae56e9be82ddba31c3fc12 | [
"Apache-2.0"
] | 3 | 2019-01-22T17:37:08.000Z | 2019-02-16T18:12:38.000Z | """A tmslack-specific wrapper for the slack client."""
from pathlib import Path
from typing import Sequence, NoReturn
from slackclient import SlackClient
from tmslack.cache import Cache
from tmslack.config import Config
class ClientException(Exception):
"""Class for exceptions in the slack client."""
class Client:
""""A slack client that can be used to send messages.
Attributes:
info: Generic information about the client
"""
@property
def info(self):
"""Returns general information about the client.
The returned map will have the url of the team, the team name, the bot name, the bot user
identifier, and the team identifier.
"""
return self._info
def lookup_user_id(self, username) -> str:
"""Looks up a user identifier in the team by the user's name or real name."""
return self._user_cache.get_through(username, do_lookup_id)
def lookup_conversation_id(self, user_ids: Sequence[str]) -> str:
"""Given a sequence of user names, get the identifier of the conversation between all those
users."""
result = self._slack.api_call('conversations.open', users=list(user_ids))
if not result['ok']:
raise ClientException(f'Failed to retrieve get conversation: {result["error"]}')
return result['channel']['id']
def send_message(self, channel_id: str, message: str) -> NoReturn:
"""Sends the given message to the given channel."""
result = self._slack.api_call('chat.postMessage',
channel=channel_id,
as_user=True,
text=message)
if not result['ok']:
raise ClientException(f'Failed to post message: {result["error"]}')
| 39.035714 | 99 | 0.596523 | """A tmslack-specific wrapper for the slack client."""
from pathlib import Path
from typing import Sequence, NoReturn
from slackclient import SlackClient
from tmslack.cache import Cache
from tmslack.config import Config
class ClientException(Exception):
"""Class for exceptions in the slack client."""
class Client:
""""A slack client that can be used to send messages.
Attributes:
info: Generic information about the client
"""
def __init__(self, configuration: Config, cache_dir):
token = configuration.token
self._slack = SlackClient(token)
teams_cache = Cache(Path(cache_dir, 'teams'))
def lookup_team(_):
result = self._slack.api_call('auth.test')
if not result['ok']:
raise ClientException(f'Failed to retrieve team information: {result["error"]}')
return {k: result[k] for k in ['url', 'team', 'user', 'team_id', 'user_id']}
self._info = teams_cache.get_through(token, lookup_team)
self._user_cache = Cache(Path(cache_dir, f"{self._info['team_id']}_users"))
@property
def info(self):
"""Returns general information about the client.
The returned map will have the url of the team, the team name, the bot name, the bot user
identifier, and the team identifier.
"""
return self._info
def lookup_user_id(self, username) -> str:
"""Looks up a user identifier in the team by the user's name or real name."""
def list_users(cursor=None):
while True:
result = self._slack.api_call('users.list', cursor=cursor)
del result['headers']
if not result['ok']:
raise IOError(f'Failed to list users: {result["error"]}')
for member in result['members']:
yield member
del result['members']
cursor = result['response_metadata']['next_cursor']
if cursor == "":
break
def do_lookup_id(_):
for user in list_users():
if username == user.get('name') or username == user.get('real_name'):
return user['id']
raise ClientException(f'The user {username} could not be found.')
return self._user_cache.get_through(username, do_lookup_id)
def lookup_conversation_id(self, user_ids: Sequence[str]) -> str:
"""Given a sequence of user names, get the identifier of the conversation between all those
users."""
result = self._slack.api_call('conversations.open', users=list(user_ids))
if not result['ok']:
raise ClientException(f'Failed to retrieve get conversation: {result["error"]}')
return result['channel']['id']
def send_message(self, channel_id: str, message: str) -> NoReturn:
"""Sends the given message to the given channel."""
result = self._slack.api_call('chat.postMessage',
channel=channel_id,
as_user=True,
text=message)
if not result['ok']:
raise ClientException(f'Failed to post message: {result["error"]}')
| 1,361 | 0 | 87 |
c05b33f448fe7de1d1b7f01c3cc36f49caa3912c | 70 | py | Python | python/cudf/cudf/api/__init__.py | ssayyah/cudf | b5ffdf5ffb398adef25435bc696df3ff30c4aa9a | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/api/__init__.py | ssayyah/cudf | b5ffdf5ffb398adef25435bc696df3ff30c4aa9a | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/api/__init__.py | ssayyah/cudf | b5ffdf5ffb398adef25435bc696df3ff30c4aa9a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION.
from cudf.api import types
| 17.5 | 41 | 0.757143 | # Copyright (c) 2020, NVIDIA CORPORATION.
from cudf.api import types
| 0 | 0 | 0 |
99a25c4e0efc56a60bb968bffb7bc4d4fc0ffdeb | 11,015 | py | Python | openmdao/components/subproblem.py | naylor-b/OpenMDAO1 | 49d82f6601b33db9bdcf7d146d030d55e3b62ef4 | [
"Apache-2.0"
] | 17 | 2018-01-11T20:13:59.000Z | 2022-03-22T03:46:05.000Z | openmdao/components/subproblem.py | naylor-b/OpenMDAO1 | 49d82f6601b33db9bdcf7d146d030d55e3b62ef4 | [
"Apache-2.0"
] | 6 | 2017-10-19T23:14:14.000Z | 2020-11-22T17:30:57.000Z | openmdao/components/subproblem.py | naylor-b/OpenMDAO1 | 49d82f6601b33db9bdcf7d146d030d55e3b62ef4 | [
"Apache-2.0"
] | 10 | 2018-04-12T22:13:33.000Z | 2020-05-07T10:02:59.000Z |
from __future__ import print_function
import sys
import os
from collections import OrderedDict
from itertools import chain
import numpy
from six import iteritems, itervalues, reraise
from openmdao.core.component import Component
from openmdao.util.dict_util import _jac_to_flat_dict
from openmdao.core.mpi_wrap import MPI
def _reraise(pathname, exc):
"""
Rather than adding the sub-Problem pathname to every system and variable
in the sub-Problem (and causing more complication w.r.t. promoted names),
just put a try block around all of the calls to the sub-Problem and
preface any exception messages with "In subproblem 'x' ..."
"""
new_err = exc[0]("In subproblem '%s': %s" % (pathname, str(exc[1])))
reraise(exc[0], new_err, exc[2])
class SubProblem(Component):
"""A Component that wraps a sub-Problem.
Args
----
problem : Problem
The Problem to be wrapped by this component.
params : iter of str
Names of variables that are to be visible as parameters to
this component. Note that these are allowed to be unknowns in
the sub-problem.
unknowns : iter of str
Names of variables that are to be visible as unknowns in this
component.
"""
def check_setup(self, out_stream=sys.stdout):
"""Write a report to the given stream indicating any potential problems
found with the current configuration of this ``System``.
Args
----
out_stream : a file-like object, optional
Stream where report will be written.
"""
try:
self._problem.check_setup(out_stream)
except:
_reraise(self.pathname, sys.exc_info())
def cleanup(self):
""" Clean up resources prior to exit. """
try:
self._problem.cleanup()
except:
_reraise(self.pathname, sys.exc_info())
def get_req_procs(self):
"""
Returns
-------
tuple
A tuple of the form (min_procs, max_procs), indicating the min and max
processors usable by this `System`.
"""
# because this is called before self._problem.setup, we need to go
# ahead and set the problem's driver's root explicitly here.
self._problem.driver.root = self._problem.root
try:
return self._problem.get_req_procs()
except:
_reraise(self.pathname, sys.exc_info())
def _get_relname_map(self, parent_proms):
"""
Args
----
parent_proms : `dict`
A dict mapping absolute names to promoted names in the parent
system.
Returns
-------
dict
Maps promoted name in parent (owner of unknowns) to
the corresponding promoted name in the child.
"""
# use an ordered dict here so we can use this smaller dict when looping
# during get_view.
# (the order of this one matches the order in the parent)
umap = OrderedDict()
for key in self._prob_unknowns:
pkey = '.'.join((self.name, key))
if pkey in parent_proms:
umap[parent_proms[pkey]] = key
return umap
def _setup_communicators(self, comm, parent_dir):
"""
Assign communicator to this `System` and run full setup on its
subproblem.
Args
----
comm : an MPI communicator (real or fake)
The communicator being offered by the parent system.
parent_dir : str
The absolute directory of the parent, or '' if unspecified. Used to
determine the absolute directory of all subsystems.
"""
self._problem.comm = comm
# do full setup on our subproblem now that we have what we need
# check_setup will be called later if specified from the top level
# Problem so always set check=False here.
try:
self._problem.setup(check=False)
except:
_reraise(self.pathname, sys.exc_info())
super(SubProblem, self)._setup_communicators(comm, parent_dir)
self._problem.pathname = self.pathname
self._problem._parent_dir = self._sysdata.absdir
for p in self._prob_params:
if not (p in self._problem._dangling or p in self._problem.root.unknowns):
raise RuntimeError("Param '%s' cannot be set. Either it will "
"be overwritten by a connected output or it "
"doesn't exist." % p)
def _setup_variables(self):
"""
Returns copies of our params and unknowns dictionaries,
re-keyed to use absolute variable names.
"""
to_prom_name = self._sysdata.to_prom_name = {}
to_abs_uname = self._sysdata.to_abs_uname = {}
to_abs_pnames = self._sysdata.to_abs_pnames = OrderedDict()
to_prom_uname = self._sysdata.to_prom_uname = OrderedDict()
to_prom_pname = self._sysdata.to_prom_pname = OrderedDict()
# Our subproblem has been completely set up. We now just pull
# variable metadata from our subproblem
subparams = self._problem.root.params
subunknowns = self._problem.root.unknowns
# keep track of params that are actually unknowns in the subproblem
self._unknowns_as_params = []
self._params_dict = self._init_params_dict = OrderedDict()
for name in self._prob_params:
pathname = self._get_var_pathname(name)
if name in subparams:
meta = subparams._dat[name].meta
elif name in self._problem._dangling:
meta = self._rec_get_param_meta(name)
else:
meta = subunknowns._dat[name].meta
if not meta.get('_canset_'):
raise TypeError("SubProblem param '%s' is mapped to the output of an internal component."
" This is illegal because a value set into the param will be overwritten"
" by the internal component." % name)
self._unknowns_as_params.append(name)
meta = meta.copy() # don't mess with subproblem's metadata!
self._params_dict[pathname] = meta
meta['pathname'] = pathname
del meta['top_promoted_name']
to_prom_pname[pathname] = name
to_abs_pnames[name] = (pathname,)
self._unknowns_dict = self._init_unknowns_dict = OrderedDict()
# if we have params that are really unknowns in the subproblem, we
# also add them as unknowns so we can take derivatives
for name in self._prob_unknowns:
pathname = self._get_var_pathname(name)
meta = subunknowns._dat[name].meta.copy()
self._unknowns_dict[pathname] = meta
meta['pathname'] = pathname
del meta['top_promoted_name']
to_prom_uname[pathname] = name
to_abs_uname[name] = pathname
to_prom_name.update(to_prom_uname)
to_prom_name.update(to_prom_pname)
self._post_setup_vars = True
self._sysdata._params_dict = self._params_dict
self._sysdata._unknowns_dict = self._unknowns_dict
return self._params_dict, self._unknowns_dict
def solve_nonlinear(self, params, unknowns, resids):
"""Sets params into the sub-problem, runs the
sub-problem, and updates our unknowns with values
from the sub-problem.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
"""
if not self.is_active():
return
try:
# set params into the subproblem
prob = self._problem
for name in self._prob_params:
prob[name] = params[name]
self._problem.run()
# update our unknowns from subproblem
for name in self._sysdata.to_abs_uname:
unknowns[name] = prob.root.unknowns[name]
resids[name] = prob.root.resids[name]
# if params are really unknowns, they may have changed, so update
for name in self._unknowns_as_params:
params[name] = prob.root.unknowns[name]
except:
_reraise(self.pathname, sys.exc_info())
def linearize(self, params, unknowns, resids):
"""
Returns Jacobian. J is a dictionary whose keys are tuples
of the form ('unknown', 'param') and whose values are ndarrays.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
Returns
-------
dict
Dictionary whose keys are tuples of the form ('unknown', 'param')
and whose values are ndarrays.
"""
try:
prob = self._problem
# set params into the subproblem
for name in self._prob_params:
prob[name] = params[name]
# have to convert jacobian returned from calc_gradient from a
# nested dict to a flat dict with tuple keys.
return _jac_to_flat_dict(prob.calc_gradient(self.params.keys(),
self.unknowns.keys(),
return_format='dict'))
except:
_reraise(self.pathname, sys.exc_info())
| 34.421875 | 109 | 0.594462 |
from __future__ import print_function
import sys
import os
from collections import OrderedDict
from itertools import chain
import numpy
from six import iteritems, itervalues, reraise
from openmdao.core.component import Component
from openmdao.util.dict_util import _jac_to_flat_dict
from openmdao.core.mpi_wrap import MPI
def _reraise(pathname, exc):
"""
Rather than adding the sub-Problem pathname to every system and variable
in the sub-Problem (and causing more complication w.r.t. promoted names),
just put a try block around all of the calls to the sub-Problem and
preface any exception messages with "In subproblem 'x' ..."
"""
new_err = exc[0]("In subproblem '%s': %s" % (pathname, str(exc[1])))
reraise(exc[0], new_err, exc[2])
class SubProblem(Component):
"""A Component that wraps a sub-Problem.
Args
----
problem : Problem
The Problem to be wrapped by this component.
params : iter of str
Names of variables that are to be visible as parameters to
this component. Note that these are allowed to be unknowns in
the sub-problem.
unknowns : iter of str
Names of variables that are to be visible as unknowns in this
component.
"""
def __init__(self, problem, params=(), unknowns=()):
super(SubProblem, self).__init__()
self._problem = problem
self._prob_params = list(params)
self._prob_unknowns = list(unknowns)
def check_setup(self, out_stream=sys.stdout):
"""Write a report to the given stream indicating any potential problems
found with the current configuration of this ``System``.
Args
----
out_stream : a file-like object, optional
Stream where report will be written.
"""
try:
self._problem.check_setup(out_stream)
except:
_reraise(self.pathname, sys.exc_info())
def cleanup(self):
""" Clean up resources prior to exit. """
try:
self._problem.cleanup()
except:
_reraise(self.pathname, sys.exc_info())
def get_req_procs(self):
"""
Returns
-------
tuple
A tuple of the form (min_procs, max_procs), indicating the min and max
processors usable by this `System`.
"""
# because this is called before self._problem.setup, we need to go
# ahead and set the problem's driver's root explicitly here.
self._problem.driver.root = self._problem.root
try:
return self._problem.get_req_procs()
except:
_reraise(self.pathname, sys.exc_info())
def _get_relname_map(self, parent_proms):
"""
Args
----
parent_proms : `dict`
A dict mapping absolute names to promoted names in the parent
system.
Returns
-------
dict
Maps promoted name in parent (owner of unknowns) to
the corresponding promoted name in the child.
"""
# use an ordered dict here so we can use this smaller dict when looping
# during get_view.
# (the order of this one matches the order in the parent)
umap = OrderedDict()
for key in self._prob_unknowns:
pkey = '.'.join((self.name, key))
if pkey in parent_proms:
umap[parent_proms[pkey]] = key
return umap
def _rec_get_param(self, name):
return self.params[name]
def _rec_get_param_meta(self, name):
try:
return self._problem.root._rec_get_param_meta(name)
except:
_reraise(self.pathname, sys.exc_info())
def _rec_set_param(self, name, value):
self.params[name] = value
def _setup_communicators(self, comm, parent_dir):
"""
Assign communicator to this `System` and run full setup on its
subproblem.
Args
----
comm : an MPI communicator (real or fake)
The communicator being offered by the parent system.
parent_dir : str
The absolute directory of the parent, or '' if unspecified. Used to
determine the absolute directory of all subsystems.
"""
self._problem.comm = comm
# do full setup on our subproblem now that we have what we need
# check_setup will be called later if specified from the top level
# Problem so always set check=False here.
try:
self._problem.setup(check=False)
except:
_reraise(self.pathname, sys.exc_info())
super(SubProblem, self)._setup_communicators(comm, parent_dir)
self._problem.pathname = self.pathname
self._problem._parent_dir = self._sysdata.absdir
for p in self._prob_params:
if not (p in self._problem._dangling or p in self._problem.root.unknowns):
raise RuntimeError("Param '%s' cannot be set. Either it will "
"be overwritten by a connected output or it "
"doesn't exist." % p)
def _setup_variables(self):
"""
Returns copies of our params and unknowns dictionaries,
re-keyed to use absolute variable names.
"""
to_prom_name = self._sysdata.to_prom_name = {}
to_abs_uname = self._sysdata.to_abs_uname = {}
to_abs_pnames = self._sysdata.to_abs_pnames = OrderedDict()
to_prom_uname = self._sysdata.to_prom_uname = OrderedDict()
to_prom_pname = self._sysdata.to_prom_pname = OrderedDict()
# Our subproblem has been completely set up. We now just pull
# variable metadata from our subproblem
subparams = self._problem.root.params
subunknowns = self._problem.root.unknowns
# keep track of params that are actually unknowns in the subproblem
self._unknowns_as_params = []
self._params_dict = self._init_params_dict = OrderedDict()
for name in self._prob_params:
pathname = self._get_var_pathname(name)
if name in subparams:
meta = subparams._dat[name].meta
elif name in self._problem._dangling:
meta = self._rec_get_param_meta(name)
else:
meta = subunknowns._dat[name].meta
if not meta.get('_canset_'):
raise TypeError("SubProblem param '%s' is mapped to the output of an internal component."
" This is illegal because a value set into the param will be overwritten"
" by the internal component." % name)
self._unknowns_as_params.append(name)
meta = meta.copy() # don't mess with subproblem's metadata!
self._params_dict[pathname] = meta
meta['pathname'] = pathname
del meta['top_promoted_name']
to_prom_pname[pathname] = name
to_abs_pnames[name] = (pathname,)
self._unknowns_dict = self._init_unknowns_dict = OrderedDict()
# if we have params that are really unknowns in the subproblem, we
# also add them as unknowns so we can take derivatives
for name in self._prob_unknowns:
pathname = self._get_var_pathname(name)
meta = subunknowns._dat[name].meta.copy()
self._unknowns_dict[pathname] = meta
meta['pathname'] = pathname
del meta['top_promoted_name']
to_prom_uname[pathname] = name
to_abs_uname[name] = pathname
to_prom_name.update(to_prom_uname)
to_prom_name.update(to_prom_pname)
self._post_setup_vars = True
self._sysdata._params_dict = self._params_dict
self._sysdata._unknowns_dict = self._unknowns_dict
return self._params_dict, self._unknowns_dict
def solve_nonlinear(self, params, unknowns, resids):
"""Sets params into the sub-problem, runs the
sub-problem, and updates our unknowns with values
from the sub-problem.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
"""
if not self.is_active():
return
try:
# set params into the subproblem
prob = self._problem
for name in self._prob_params:
prob[name] = params[name]
self._problem.run()
# update our unknowns from subproblem
for name in self._sysdata.to_abs_uname:
unknowns[name] = prob.root.unknowns[name]
resids[name] = prob.root.resids[name]
# if params are really unknowns, they may have changed, so update
for name in self._unknowns_as_params:
params[name] = prob.root.unknowns[name]
except:
_reraise(self.pathname, sys.exc_info())
def linearize(self, params, unknowns, resids):
"""
Returns Jacobian. J is a dictionary whose keys are tuples
of the form ('unknown', 'param') and whose values are ndarrays.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
Returns
-------
dict
Dictionary whose keys are tuples of the form ('unknown', 'param')
and whose values are ndarrays.
"""
try:
prob = self._problem
# set params into the subproblem
for name in self._prob_params:
prob[name] = params[name]
# have to convert jacobian returned from calc_gradient from a
# nested dict to a flat dict with tuple keys.
return _jac_to_flat_dict(prob.calc_gradient(self.params.keys(),
self.unknowns.keys(),
return_format='dict'))
except:
_reraise(self.pathname, sys.exc_info())
def add_param(self, name, **kwargs):
raise NotImplementedError("Can't add '%s' to SubProblem. "
"add_param is not supported." % name)
def add_output(self, name, **kwargs):
raise NotImplementedError("Can't add '%s' to SubProblem. "
"add_output is not supported." % name)
def add_state(self, name, **kwargs):
raise NotImplementedError("Can't add '%s' to SubProblem. "
"add_state is not supported." % name)
| 911 | 0 | 189 |
058a4cd65ed4296366b25981a9b2ed20934bd738 | 2,056 | py | Python | reachy_pyluos_hal/joint.py | pollen-robotics/reachy_pyluos_hal | dcd2ddc937c60170907f258994590fc9ecfe5d7f | [
"Apache-2.0"
] | null | null | null | reachy_pyluos_hal/joint.py | pollen-robotics/reachy_pyluos_hal | dcd2ddc937c60170907f258994590fc9ecfe5d7f | [
"Apache-2.0"
] | 2 | 2021-04-07T15:34:45.000Z | 2022-01-26T09:03:44.000Z | reachy_pyluos_hal/joint.py | pollen-robotics/reachy_pyluos_hal | dcd2ddc937c60170907f258994590fc9ecfe5d7f | [
"Apache-2.0"
] | null | null | null | """Joint abstraction.
It can be any DynamixelMotor or an OrbitaMotor.
"""
from abc import ABC
from logging import Logger
from typing import Callable, Dict, Optional, Tuple
from .register import Register
class Joint(ABC):
"""Joint abstraction.
Should define the following registers:
'torque_enable'
'goal_position'
'moving_speed'
'torque_limit'
'present_position'
'temperature'
"""
def __init__(self, register_config: Dict[str,
Tuple[
Callable[[bytes], float],
Callable[[float], bytes]
]]
) -> None:
"""Set up internal registers."""
self.registers = {
reg: Register(cvt_as_usi, cvt_as_raw)
for reg, (cvt_as_usi, cvt_as_raw) in register_config.items()
}
self.logger: Optional[Logger] = None
def is_value_set(self, register: str) -> bool:
"""Check if the register has been set since last reset."""
return self.registers[register].is_set()
def clear_value(self, register: str):
"""Clear the specified value, meaning its value should be make obsolete."""
self.registers[register].reset()
def get_value(self, register: str) -> bytes:
"""Get the up-to-date specified value."""
return self.registers[register].get()
def get_value_as_usi(self, register: str) -> float:
"""Get the up-to-date specified value."""
return self.registers[register].get_as_usi()
def update_value(self, register: str, val: bytes):
"""Update the specified register with the raw value received from a gate."""
self.registers[register].update(val)
def update_value_using_usi(self, register: str, val: float):
"""Update the specified register with its USI value received from a gate."""
self.registers[register].update_using_usi(val)
| 33.16129 | 84 | 0.589008 | """Joint abstraction.
It can be any DynamixelMotor or an OrbitaMotor.
"""
from abc import ABC
from logging import Logger
from typing import Callable, Dict, Optional, Tuple
from .register import Register
class Joint(ABC):
"""Joint abstraction.
Should define the following registers:
'torque_enable'
'goal_position'
'moving_speed'
'torque_limit'
'present_position'
'temperature'
"""
def __init__(self, register_config: Dict[str,
Tuple[
Callable[[bytes], float],
Callable[[float], bytes]
]]
) -> None:
"""Set up internal registers."""
self.registers = {
reg: Register(cvt_as_usi, cvt_as_raw)
for reg, (cvt_as_usi, cvt_as_raw) in register_config.items()
}
self.logger: Optional[Logger] = None
def is_value_set(self, register: str) -> bool:
"""Check if the register has been set since last reset."""
return self.registers[register].is_set()
def clear_value(self, register: str):
"""Clear the specified value, meaning its value should be make obsolete."""
self.registers[register].reset()
def get_value(self, register: str) -> bytes:
"""Get the up-to-date specified value."""
return self.registers[register].get()
def get_value_as_usi(self, register: str) -> float:
"""Get the up-to-date specified value."""
return self.registers[register].get_as_usi()
def update_value(self, register: str, val: bytes):
"""Update the specified register with the raw value received from a gate."""
self.registers[register].update(val)
def update_value_using_usi(self, register: str, val: float):
"""Update the specified register with its USI value received from a gate."""
self.registers[register].update_using_usi(val)
| 0 | 0 | 0 |
cd44b8b799c36096d1eb83a3415c97f081905bc6 | 2,116 | py | Python | prototypes/usb_audio.py | Exus1/alfa-blue-me | 4b2f9f549967b44688e753a64b0578ebbfedf430 | [
"MIT"
] | null | null | null | prototypes/usb_audio.py | Exus1/alfa-blue-me | 4b2f9f549967b44688e753a64b0578ebbfedf430 | [
"MIT"
] | 1 | 2020-07-06T14:36:18.000Z | 2021-01-27T09:13:12.000Z | prototypes/usb_audio.py | Exus1/alfa-blue-me | 4b2f9f549967b44688e753a64b0578ebbfedf430 | [
"MIT"
] | null | null | null | import can
import asyncio
import time
can0 = can.ThreadSafeBus(channel = 'can0', bustype = 'socketcan_ctypes')
loop = asyncio.get_event_loop()
ready = False
przelaczone = False
counter = 0;
can.Notifier(can0, [
on_message
], loop=loop)
loop.call_soon(send_status)
loop.run_forever()
| 37.122807 | 151 | 0.655955 | import can
import asyncio
import time
can0 = can.ThreadSafeBus(channel = 'can0', bustype = 'socketcan_ctypes')
loop = asyncio.get_event_loop()
ready = False
przelaczone = False
def on_message(msg: can.Message):
global przelaczone
if(msg.arbitration_id == 0x405 and msg.data == bytearray([0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) and not przelaczone):
# 747#21 14 00 28 65 70
# proxiMsg = can.Message(arbitration_id=0x545, data=[0xE0, 0x00, 0x00, 0x00, 0x00, 0x00], extended_id=False)
# can0.send(proxiMsg)
title = can.Message(arbitration_id=0x5E7, data=[0x00, 0x2A, 0x30, 0xdf, 0xce, 0x3c, 0x00, 0x00], extended_id=False)
can0.send(title)
global ready
ready = True
print('przelacza')
przelaczone = True
counter = 0;
def send_status():
global ready
global counter
if not ready:
# statusMsg = can.Message(arbitration_id=0x545, data=[0x58, 0x04, 0x0C, 0x00, 0x02, 0x00], extended_id=False)
trackMsg = can.Message(arbitration_id=0x427, data=[0x00, 0x00, 0xC8, 0x78, 0x00, 0x00, 0x00, 0x00], extended_id=False)
statusMsg2 = can.Message(arbitration_id=0x3E7, data=[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80],
extended_id=False)
else:
statusMsg = can.Message(arbitration_id=0x545, data=[0x58, 0x04, 0x0C, 0x00, 0x02, 0x00], extended_id=False)
can0.send(statusMsg)
# statusMsg = can.Message(arbitration_id=0x545, data=[0xE0, 0x00, 0x00, 0x00, 0x02, 0x00], extended_id=False)
trackMsg = can.Message(arbitration_id=0x427, data=[0x00, int('0x0' + str(counter), 16), 0x40, 0x78, 0x00, 0x00, 0x00, 0x00], extended_id=False)
statusMsg2 = can.Message(arbitration_id=0x3E7, data=[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84],
extended_id=False)
counter += 1
# can0.send(statusMsg)
can0.send(statusMsg2)
can0.send(trackMsg)
loop.call_later(0.95, send_status)
can.Notifier(can0, [
on_message
], loop=loop)
loop.call_soon(send_status)
loop.run_forever()
| 1,777 | 0 | 44 |
82b3c088b0b897efda02d574bdd89eb14aca259f | 832 | py | Python | src/main.py | Ross-Morgan/Video-Project-Building-Tutorial | 802d97ecff838bbb6da127797215fb64177f3876 | [
"BSD-3-Clause"
] | null | null | null | src/main.py | Ross-Morgan/Video-Project-Building-Tutorial | 802d97ecff838bbb6da127797215fb64177f3876 | [
"BSD-3-Clause"
] | null | null | null | src/main.py | Ross-Morgan/Video-Project-Building-Tutorial | 802d97ecff838bbb6da127797215fb64177f3876 | [
"BSD-3-Clause"
] | null | null | null | import sys
from PyQt6 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
main()
| 20.8 | 73 | 0.627404 | import sys
from PyQt6 import QtCore, QtGui, QtWidgets
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, size: QtCore.QSize, title: str, icon: QtGui.QIcon,
parent: QtWidgets.QWidget = None):
super().__init__(parent=parent)
self.resize(size)
self.setWindowTitle(title)
self.setWindowIcon(icon)
self.setup_shortcuts()
self.setup_ui()
def setup_shortcuts(self):
"""Add shortcuts to the program"""
def setup_ui(self):
"""Add graphical elements to the program"""
def main():
app = QtWidgets.QApplication(sys.argv)
size = QtCore.QSize(1920, 1080)
title = "Test Window"
icon = QtGui.QIcon("")
window = MainWindow(size, title, icon)
window.show()
app.exec()
if __name__ == "__main__":
main()
| 492 | 197 | 46 |
a014ec256acc9103f16604bce4f6d51a7778de63 | 2,279 | py | Python | SnakeBackend/main/utils/GameField.py | PatrickKoss/Snake | b15ba77d7b848db48206c14d8e16f10ebcc2e808 | [
"MIT"
] | null | null | null | SnakeBackend/main/utils/GameField.py | PatrickKoss/Snake | b15ba77d7b848db48206c14d8e16f10ebcc2e808 | [
"MIT"
] | 11 | 2021-03-19T12:27:29.000Z | 2022-02-27T11:10:27.000Z | SnakeBackend/main/utils/GameField.py | PatrickKoss/Snake | b15ba77d7b848db48206c14d8e16f10ebcc2e808 | [
"MIT"
] | null | null | null | from .Point import Point
| 42.203704 | 109 | 0.587977 | from .Point import Point
class GameField:
def __init__(self, item, snakes):
self.rows = 40
self.columns = 40
self.item = item
self.snakes = snakes
self.area = []
for row in range(self.rows):
for column in range(self.columns):
self.area.append(Point(row, column))
def update_snakes_blocked_directions(self, snakes):
for index, snake in enumerate(snakes):
# check if the head will be out of bounds when it would change the direction
if snake.blocks[0].x == 0:
snake.current_direction_left_blocked = 1
if snake.blocks[0].x == self.rows - 1:
snake.current_direction_right_blocked = 1
if snake.blocks[0].y == 0:
snake.current_direction_top_blocked = 1
if snake.blocks[0].y == self.columns - 1:
snake.current_direction_bottom_blocked = 1
# check if the snake would crush with other snakes or itself
# first check its own blocks
for i, block in enumerate(snake.blocks):
if i == 0:
continue
self.update_snake_current_block(snake, block)
# finally check the other snakes
for otherSnake in snakes:
# if it is the same snake then continue
if snake == otherSnake:
continue
for block in otherSnake.blocks:
self.update_snake_current_block(snake, block)
def update_snake_current_block(self, snake, block):
if snake.blocks[0].x == block.x + 1 and snake.blocks[0].y == block.y:
snake.current_direction_left_blocked = 1
if snake.blocks[0].x == block.x - 1 and snake.blocks[0].y == block.y:
snake.current_direction_right_blocked = 1
if snake.blocks[0].y == block.y - 1 and snake.blocks[0].x == block.x:
snake.current_direction_top_blocked = 1
if snake.blocks[0].y == block.y + 1 and snake.blocks[0].x == block.x:
snake.current_direction_bottom_blocked = 1
def repr_json(self):
return dict(rows=self.rows, columns=self.columns, item=self.item, snakes=self.snakes, area=self.area)
| 2,128 | -5 | 130 |
67ff00c200f9767432af05a3c27135f192ddc0f7 | 156,558 | py | Python | bigmler/resources.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | bigmler/resources.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | bigmler/resources.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2012-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources management functions
"""
from __future__ import absolute_import
import sys
import time
try:
import simplejson as json
except ImportError:
import json
import bigml.api
from bigml.util import bigml_locale
from bigml.multivote import THRESHOLD_CODE
from bigmler.utils import (dated, get_url, log_message, plural, check_resource,
check_resource_error, log_created_resources,
decode2, transform_fields_keys,
is_shared, FILE_ENCODING, PYTHON3)
from bigmler.labels import label_model_args, get_all_labels
from bigmler.reports import report
EVALUATE_SAMPLE_RATE = 0.8
SEED = "BigML, Machine Learning made easy"
LOCALE_DEFAULT = "en_US"
FIELDS_QS = 'only_model=true'
ALL_FIELDS_QS = "limit=-1"
ADD_PREFIX = '+'
REMOVE_PREFIX = '-'
ADD_REMOVE_PREFIX = [ADD_PREFIX, REMOVE_PREFIX]
BRIEF_FORMAT = 'brief'
NORMAL_FORMAT = 'normal'
FULL_FORMAT = 'full'
VALID_FIELD_ATTRIBUTES = {
"source": ["name", "label", "description", "optype", "term_analysis"],
"dataset": ["name", "label", "description", "preferred", "term_analysis"]}
BOOSTING_OPTIONS = ["iterations", "early_holdout", "learning_rate", \
"early_out_of_bag", "step_out_of_bag"]
DS_NAMES = "ABCDEFGHIJKLMNOPQRSTUVXYZ"
def get_basic_seed(order):
""" Builds a standard seed from a text adding the order
"""
return "%s - %s" % (SEED, order)
def shared_changed(shared, resource):
"""Returns True if the shared status of the resource differs from the user
given value
"""
return is_shared(resource) != shared
def configure_input_fields(fields, user_given_fields, by_name=False):
""" Returns the input fields used in the new resource creation as given
The user can choose to write all the fields that will be used in the
new resource or modify the set of fields retrieved from the
resource that will be used to create the new one.
"""
def modify_input_fields(prefix, field, input_fields):
"""Adds or removes according to the prefix in the given field
this field from the list of input fields.
"""
if prefix == ADD_PREFIX:
if not field in input_fields:
input_fields.append(field)
elif field in input_fields:
input_fields.remove(field)
# case of adding and removing fields to the dataset preferred field set
if all([name[0] in ADD_REMOVE_PREFIX for name in user_given_fields]):
preferred_fields = fields.preferred_fields()
input_fields = preferred_fields.keys()
if by_name:
input_fields = [fields.field_name(field_id) for field_id in
input_fields]
for name in user_given_fields:
prefix = name[0]
field_name = name[1:]
if by_name:
modify_input_fields(prefix, field_name, input_fields)
else:
try:
field_id = fields.field_id(field_name)
except ValueError, exc:
sys.exit(exc)
modify_input_fields(prefix, field_id, input_fields)
# case of user given entire list of fields
else:
if by_name:
return user_given_fields
else:
input_fields = []
for name in user_given_fields:
try:
input_fields.append(fields.field_id(name))
except ValueError, exc:
sys.exit(exc)
return input_fields
def utf8(text):
"""Encodes using the global FILE_ENCODING
"""
return text.encode(FILE_ENCODING)
def update_attributes(updatable_attributes, new_attributes, by_column=False,
fields=None):
"""Correctly merging the "fields" attribute substructure in updates.
updatable_attributes: previous attributes to be updated
new_attributes: updates to be added
by_column: set to True is keys are the column position of the field
fields: Fields object info
"""
if new_attributes:
fields_substructure = updatable_attributes.get("fields", {})
field_attributes = new_attributes.get("fields", {})
if field_attributes and (not by_column or fields):
for field_key, value in field_attributes.items():
field_id = (field_key if not by_column
else fields.field_id(field_key))
if not field_id in fields_substructure.keys():
fields_substructure.update({field_id: {}})
fields_substructure[field_id].update(value)
updatable_attributes.update({"fields": fields_substructure})
else:
updatable_attributes.update(new_attributes)
def update_json_args(resource_attributes, json_attributes, fields=None):
"""Updating the resource attributes with the contents of a JSON file
"""
if fields is not None:
# transforms the fields structure changes if columns are used as keys
json_attributes = transform_fields_keys(json_attributes, fields)
update_attributes(resource_attributes, json_attributes)
def relative_input_fields(fields, user_given_fields):
"""Returns the user given input fields using relative syntax
"""
input_fields = []
if all([(name[0] in ADD_REMOVE_PREFIX) for name in user_given_fields]):
return user_given_fields
preferred_fields = fields.preferred_fields()
for field_id in preferred_fields.keys():
name = fields.fields[field_id]['name']
if not name in user_given_fields:
input_fields.append("%s%s" % (REMOVE_PREFIX, name))
for name in user_given_fields:
try:
field_id = fields.field_id(name)
except ValueError, exc:
sys.exit(exc)
input_fields.append("%s%s" % (ADD_PREFIX, name))
return input_fields
def wait_for_available_tasks(inprogress, max_parallel, api,
resource_type, wait_step=2):
"""According to the max_parallel number of parallel resources to be
created, when the number of in progress resources reaches the limit,
it checks the ones in inprogress to see if there's a
FINISHED or FAULTY resource. If found, it is removed from the
inprogress list and returns to allow another one to be created.
"""
check_kwargs = {"retries": 0, "query_string": "full=false", "api": api}
while len(inprogress) == max_parallel:
for j in range(0, len(inprogress)):
try:
ready = check_resource(inprogress[j], **check_kwargs)
status = bigml.api.get_status(ready)
if status['code'] == bigml.api.FINISHED:
del inprogress[j]
return
elif status['code'] == bigml.api.FAULTY:
raise ValueError(status['message'])
except ValueError, exception:
sys.exit("Failed to get a finished %s: %s" %
(resource_type, str(exception)))
time.sleep(max_parallel * wait_step)
def check_fields_struct(update_args, resource_type):
"""In case the args to update have a `fields` attribute, it checks the
structure in this attribute and removes the attributes for each field
that will not be accepted by the API.
"""
if "fields" in update_args:
fields_substr = update_args.get("fields")
for _, field in fields_substr.items():
attributes = field.keys()
for attribute in attributes:
if not attribute in VALID_FIELD_ATTRIBUTES.get(resource_type):
del field[attribute]
def set_basic_args(args, name):
"""Sets the basic arguments, common to all resources
"""
return {
"name": name,
"description": args.description_,
"category": args.category,
"tags": args.tag}
def set_basic_model_args(args, name):
"""Sets the additional args common to all models
"""
model_args = set_basic_args(args, name)
if args.default_numeric_value is not None:
model_args.update({ \
"default_numeric_value": args.default_numeric_value})
return model_args
def set_basic_batch_args(args, name):
"""Sets the additional args common to all batch resources
"""
batch_args = set_basic_args(args, name)
header = (hasattr(args, "prediction_header") and args.prediction_header) \
or (hasattr(args, "projection_header") and args.projection_header)
batch_args.update({ \
"header": header,
"output_dataset": args.to_dataset
})
return batch_args
def set_source_args(args, name=None, multi_label_data=None,
data_set_header=None, fields=None):
"""Returns a source arguments dict
"""
if name is None:
name = args.name
source_args = set_basic_args(args, name)
if args.project_id is not None:
source_args.update({"project": args.project_id})
# if header is set, use it
if data_set_header is not None:
source_args.update({"source_parser": {"header": data_set_header}})
# If user has given an OS locale, try to add the locale used in bigml.com
if args.user_locale is not None:
source_locale = bigml_locale(args.user_locale)
if source_locale is None:
log_message("WARNING: %s locale equivalence not found."
" Using %s instead.\n" % (args.user_locale,
LOCALE_DEFAULT),
log_file=None, console=True)
source_locale = LOCALE_DEFAULT
source_args.update({'source_parser': {}})
source_args["source_parser"].update({'locale': source_locale})
# If user has set a training separator, use it.
if args.training_separator is not None:
training_separator = decode2(args.training_separator,
encoding="string_escape")
source_args["source_parser"].update({'separator': training_separator})
# If uploading a multi-label file, add the user_metadata info needed to
# manage the multi-label fields
if (hasattr(args, 'multi_label') and args.multi_label
and multi_label_data is not None):
source_args.update({
"user_metadata": {
"multi_label_data": multi_label_data}})
# to update fields attributes or types you must have a previous fields
# structure (at update time)
if fields:
if args.field_attributes_:
update_attributes(source_args,
{"fields": args.field_attributes_},
by_column=True, fields=fields)
if args.types_:
update_attributes(source_args,
{"fields": args.types_},
by_column=True, fields=fields)
if args.import_fields:
fields_struct = fields.new_fields_structure(args.import_fields)
check_fields_struct(fields_struct, "source")
update_attributes(source_args, fields_struct)
if 'source' in args.json_args:
update_json_args(source_args, args.json_args.get('source'), fields)
return source_args
def create_source(data_set, source_args, args, api=None, path=None,
session_file=None, log=None, source_type=None):
"""Creates remote source
"""
if api is None:
api = bigml.api.BigML()
suffix = "" if source_type is None else "%s " % source_type
message = dated("Creating %ssource.\n" % suffix)
log_message(message, log_file=session_file, console=args.verbosity)
check_fields_struct(source_args, "source")
source = api.create_source(data_set, source_args,
progress_bar=args.progress_bar)
if path is not None:
suffix = "_" + source_type if source_type else ""
log_created_resources(
"source%s" % suffix, path,
source['resource'], mode='a',
comment=("%s\n" % source['object']['name']))
source_id = check_resource_error(source, "Failed to create source: ")
try:
source = check_resource(source, api.get_source,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished source: %s" % str(exception))
message = dated("Source created: %s\n" % get_url(source))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % source_id, log_file=log)
if args.reports:
report(args.reports, path, source)
return source
def data_to_source(args):
"""Extracts the flags info to create a source object
"""
data_set = None
data_set_header = None
if (args.training_set and not args.source and not args.dataset and
not args.has_models_):
data_set = args.training_set
data_set_header = args.train_header
elif (hasattr(args, 'evaluate') and args.evaluate and args.test_set
and not args.source):
data_set = args.test_set
data_set_header = args.test_header
return data_set, data_set_header
def get_source(source, api=None, verbosity=True,
session_file=None):
"""Retrieves the source in its actual state and its field info
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(source, basestring) or
bigml.api.get_status(source)['code'] != bigml.api.FINISHED):
message = dated("Retrieving source. %s\n" %
get_url(source))
log_message(message, log_file=session_file,
console=verbosity)
try:
source = check_resource(source, api.get_source,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished source: %s" % str(exception))
return source
def update_source(source, source_args, args,
api=None, session_file=None):
"""Updates source properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating source. %s\n" %
get_url(source))
log_message(message, log_file=session_file,
console=args.verbosity)
source = api.update_source(source, source_args)
check_resource_error(source, "Failed to update source: ")
source = check_resource(source, api.get_source)
return source
def set_basic_dataset_args(args, name=None):
"""Return dataset basic arguments dict
"""
if name is None:
name = args.name
dataset_args = set_basic_args(args, name)
if args.sample_rate != 1 and args.no_model:
dataset_args.update({
"seed": SEED if args.seed is None else args.seed,
"sample_rate": args.sample_rate
})
if hasattr(args, "range") and args.range_:
dataset_args.update({
"range": args_range
})
return dataset_args
def set_dataset_args(args, fields, multi_label_data=None):
"""Return dataset arguments dict
"""
dataset_args = set_basic_dataset_args(args)
objective_field = (None if not hasattr(args, 'objective_field')
else args.objective_field)
if multi_label_data is not None and objective_field is None:
objective_field = multi_label_data['objective_name']
if objective_field is not None and fields is not None:
try:
objective_id = fields.field_id(objective_field)
except ValueError, exc:
sys.exit(exc)
dataset_args.update(objective_field={'id': objective_id})
if hasattr(args, 'juxtapose') and args.juxtapose:
dataset_args.update({"juxtapose": args.juxtapose})
if hasattr(args, 'sql_query') and args.sql_query:
dataset_args.update({"sql_query": args.sql_query})
if hasattr(args, 'sql_output_fields_') and args.sql_output_fields_:
dataset_args.update({"sql_output_fields": args.sql_output_fields_})
if hasattr(args, 'json_query_') and args.json_query_:
dataset_args.update({"json_query": args.json_query_})
if args.json_filter:
dataset_args.update(json_filter=args.json_filter)
elif args.lisp_filter:
dataset_args.update(lisp_filter=args.lisp_filter)
if args.dataset_fields_ and fields is not None:
input_fields = configure_input_fields(fields, args.dataset_fields_)
dataset_args.update(input_fields=input_fields)
if (hasattr(args, 'multi_label') and args.multi_label
and multi_label_data is not None):
dataset_args.update(
user_metadata={'multi_label_data': multi_label_data})
if fields and args.import_fields:
fields_struct = fields.new_fields_structure(args.import_fields)
check_fields_struct(fields_struct, "dataset")
update_attributes(dataset_args, fields_struct)
if 'dataset' in args.json_args:
update_json_args(dataset_args, args.json_args.get('dataset'), fields)
return dataset_args
def set_dataset_split_args(name, description, args, sample_rate=1,
out_of_bag=False, multi_label_data=None):
"""Return dataset arguments dict to split a dataset
"""
dataset_args = {
"name": name,
"description": description,
"category": args.category,
"tags": args.tag,
"seed": SEED if args.seed is None else args.seed,
"sample_rate": sample_rate,
"out_of_bag": out_of_bag
}
if hasattr(args, "range") and args.range_:
dataset_args.update({
"range": args_range
})
if (hasattr(args, "multi_label") and
args.multi_label and multi_label_data is not None):
dataset_args.update(
user_metadata={'multi_label_data': multi_label_data})
return dataset_args
def create_dataset(origin_resource, dataset_args, args, api=None,
path=None, session_file=None, log=None, dataset_type=None):
"""Creates remote dataset from source, dataset, cluster or datasets list
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating dataset.\n")
log_message(message, log_file=session_file, console=args.verbosity)
check_fields_struct(dataset_args, "dataset")
# if --json-query or --sql-query are used and no names are set for
# the datasets, we create default naming to A, B, C, etc. for the datasets
# to be used as origin
if ((hasattr(args, 'sql_query') and args.sql_query) or \
(hasattr(args, 'json_query') and args.sql_query)) and \
isinstance(origin_resource, list) and \
((not isinstance(origin_resource[0], dict)) or \
origin_resource[0].get("name") is None):
for index, element in enumerate(origin_resource):
if index < len(DS_NAMES):
if isinstance(element, dict):
if element.get("resource") is not None:
element = {"id": element["resource"]}
element.update({"name": DS_NAMES[index]})
origin_resource[index] = element
elif isinstance(element, basestring):
origin_resource[index] = {"id": element,
"name": DS_NAMES[index]}
dataset = api.create_dataset(origin_resource, dataset_args, retries=None)
suffix = "_" + dataset_type if dataset_type else ""
log_created_resources("dataset%s" % suffix, path,
bigml.api.get_dataset_id(dataset), mode='a')
dataset_id = check_resource_error(dataset, "Failed to create dataset: ")
try:
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished dataset: %s" % str(exception))
message = dated("Dataset created: %s\n" % get_url(dataset))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % dataset_id, log_file=log)
if args.reports:
report(args.reports, path, dataset)
return dataset
def get_dataset(dataset, api=None, verbosity=True, session_file=None):
"""Retrieves the dataset in its actual state
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(dataset, basestring) or
bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED):
message = dated("Retrieving dataset. %s\n" %
get_url(dataset))
log_message(message, log_file=session_file,
console=verbosity)
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
check_resource_error(dataset, "Failed to get dataset: ")
return dataset
def publish_dataset(dataset, args, api=None, session_file=None):
"""Publishes dataset and sets its price (if any)
"""
if api is None:
api = bigml.api.BigML()
public_dataset = {"private": False}
if args.dataset_price:
public_dataset.update(price=args.dataset_price)
dataset = update_dataset(dataset, public_dataset, args, api=api,
session_file=session_file)
check_resource_error(dataset, "Failed to update dataset: ")
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
return dataset
def update_dataset(dataset, dataset_args, args,
api=None, path=None, session_file=None):
"""Updates dataset properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating dataset. %s\n" %
get_url(dataset))
log_message(message, log_file=session_file,
console=args.verbosity)
dataset = api.update_dataset(dataset, dataset_args)
if is_shared(dataset):
message = dated("Shared dataset link. %s\n" %
get_url(dataset, shared=True))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, dataset)
check_resource_error(dataset, "Failed to update dataset: ")
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
return dataset
def set_model_args(args, name=None, objective_id=None, fields=None,
model_fields=None, other_label=None):
"""Return model arguments dict
"""
if name is None:
name = args.name
if objective_id is None and args.max_categories is None:
objective_id = args.objective_id_
if args.max_categories > 0:
objective_id = args.objective_field
if model_fields is None:
model_fields = args.model_fields_
model_args = set_basic_model_args(args, name)
model_args.update({"missing_splits": args.missing_splits})
if objective_id is not None and fields is not None:
model_args.update({"objective_field": objective_id})
# If evaluate flag is on and no test_split flag is provided,
# we choose a deterministic sampling with
# args.sample_rate (80% by default) of the data to create the model
# If cross_validation_rate = n/100, then we choose to run 2 * n evaluations
# by holding out a n% of randomly sampled data.
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
model_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
if model_fields and fields is not None:
input_fields = configure_input_fields(
fields, model_fields, by_name=(args.max_categories > 0))
model_args.update(input_fields=input_fields)
if args.pruning and args.pruning != 'smart':
model_args.update(stat_pruning=(args.pruning == 'statistical'))
if args.node_threshold > 0:
model_args.update(node_threshold=args.node_threshold)
if args.balance:
model_args.update(balance_objective=True)
if args.split_field:
model_args.update(split_field=args.split_field)
if args.focus_field:
model_args.update(focus_field=args.focus_field)
if args.weight_field:
try:
weight_field = fields.field_id(args.weight_field)
except ValueError, exc:
sys.exit(exc)
model_args.update(weight_field=weight_field)
if args.objective_weights:
model_args.update(objective_weights=args.objective_weights_json)
if args.max_categories > 0:
model_args.update(
user_metadata={'other_label': other_label,
'max_categories': args.max_categories})
model_args = update_sample_parameters_args(model_args, args)
if 'model' in args.json_args:
update_json_args(model_args, args.json_args.get('model'), fields)
return model_args
def set_label_model_args(args, fields, labels, multi_label_data):
"""Set of args needed to build a model per label
"""
objective_field = args.objective_field
if not args.model_fields_:
model_fields = []
else:
model_fields = relative_input_fields(fields, args.model_fields_)
if objective_field is None:
objective_field = fields.objective_field
try:
objective_id = fields.field_id(objective_field)
objective_field = fields.field_name(objective_id)
except ValueError, exc:
sys.exit(exc)
all_labels = get_all_labels(multi_label_data)
model_args_list = []
for index in range(args.number_of_models - 1, -1, -1):
label = labels[index]
(new_name, label_field, single_label_fields) = label_model_args(
args.name, label, all_labels, model_fields,
objective_field)
model_args = set_model_args(args, name=new_name,
objective_id=label_field, fields=fields,
model_fields=single_label_fields)
if multi_label_data is not None:
model_args.update(
user_metadata={'multi_label_data': multi_label_data})
model_args_list.append(model_args)
return model_args_list
def create_models(datasets, model_ids, model_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote models
"""
if api is None:
api = bigml.api.BigML()
models = model_ids[:]
existing_models = len(models)
model_args_list = []
if args.dataset_off and args.evaluate:
args.test_dataset_ids = datasets[:]
if not args.multi_label:
datasets = datasets[existing_models:]
# if resuming and all models were created, there will be no datasets left
if datasets:
dataset = datasets[0]
if isinstance(model_args, list):
model_args_list = model_args
if args.number_of_models > 0:
message = dated("Creating %s.\n" %
plural("model", args.number_of_models))
log_message(message, log_file=session_file,
console=args.verbosity)
single_model = args.number_of_models == 1 and existing_models == 0
# if there's more than one model the first one must contain
# the entire field structure to be used as reference.
query_string = (FIELDS_QS if single_model and (args.test_header \
and not args.export_fields) else ALL_FIELDS_QS)
inprogress = []
for i in range(0, args.number_of_models):
wait_for_available_tasks(inprogress, args.max_parallel_models,
api, "model")
if model_args_list:
model_args = model_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_models)
model_args.update(seed=new_seed)
# one model per dataset (--max-categories or single model)
if (args.max_categories > 0 or
(args.test_datasets and args.evaluate)):
dataset = datasets[i]
model = api.create_model(dataset, model_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_models]
model = api.create_model(multi_dataset, model_args,
retries=None)
else:
model = api.create_model(datasets, model_args,
retries=None)
model_id = check_resource_error(model,
"Failed to create model: ")
log_message("%s\n" % model_id, log_file=log)
model_ids.append(model_id)
inprogress.append(model_id)
models.append(model)
log_created_resources("models", path, model_id, mode='a')
if args.number_of_models < 2 and args.verbosity:
if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
try:
model = check_resource(model, api.get_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" %
str(exception))
models[0] = model
message = dated("Model created: %s\n" %
get_url(model))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, model)
return models, model_ids
def create_model(cluster, model_args, args, api=None,
path=None, session_file=None, log=None, model_type=None):
"""Creates remote model from cluster and centroid
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating model.\n")
log_message(message, log_file=session_file, console=args.verbosity)
model = api.create_model(cluster, model_args, retries=None)
suffix = "" if model_type is None else "_%s" % model_type
log_created_resources("models%s" % suffix, path,
bigml.api.get_model_id(model), mode='a')
model_id = check_resource_error(model, "Failed to create model: ")
try:
model = check_resource(model, api.get_model,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" % str(exception))
message = dated("Model created: %s\n" % get_url(model))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % model_id, log_file=log)
if args.reports:
report(args.reports, path, model)
return model
def update_model(model, model_args, args,
api=None, path=None, session_file=None):
"""Updates model properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating model. %s\n" %
get_url(model))
log_message(message, log_file=session_file,
console=args.verbosity)
model = api.update_model(model, model_args)
check_resource_error(model, "Failed to update model: %s"
% model['resource'])
model = check_resource(model, api.get_model, query_string=ALL_FIELDS_QS)
if is_shared(model):
message = dated("Shared model link. %s\n" %
get_url(model, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, model)
return model
def get_models(model_ids, args, api=None, session_file=None):
"""Retrieves remote models in its actual status
"""
if api is None:
api = bigml.api.BigML()
model_id = ""
models = model_ids
single_model = len(model_ids) == 1
if single_model:
model_id = model_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("model", len(model_ids)),
get_url(model_id)))
log_message(message, log_file=session_file, console=args.verbosity)
if len(model_ids) < args.max_batch_models:
models = []
for model in model_ids:
try:
# if there's more than one model the first one must contain
# the entire field structure to be used as reference.
query_string = (
ALL_FIELDS_QS if (
(not single_model and (
len(models) == 0 or args.multi_label)) or
not args.test_header)
else FIELDS_QS)
model = check_resource(model, api.get_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" %
str(exception))
models.append(model)
model = models[0]
else:
try:
query_string = (ALL_FIELDS_QS if not single_model or
not args.test_header
else FIELDS_QS)
model = check_resource(model_ids[0], api.get_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" % str(exception))
models[0] = model
return models, model_ids
def set_label_ensemble_args(args, labels, multi_label_data,
number_of_ensembles, fields):
"""Set of args needed to build an ensemble per label
"""
if not args.model_fields_:
args.model_fields_ = relative_input_fields(fields, args.model_fields_)
if args.objective_field is None:
args.objective_field = fields.objective_field
try:
objective_id = fields.field_id(args.objective_field)
except ValueError, exc:
sys.exit(exc)
objective_field = fields.fields[objective_id]['name']
ensemble_args_list = []
for index in range(number_of_ensembles - 1, -1, -1):
label = labels[index]
all_labels = get_all_labels(multi_label_data)
(new_name, label_field, single_label_fields) = label_model_args(
args.name, label, all_labels, args.model_fields_,
objective_field)
ensemble_args = set_ensemble_args(args, name=new_name,
objective_id=label_field,
model_fields=single_label_fields,
fields=fields)
if multi_label_data is not None:
ensemble_args.update(
user_metadata={'multi_label_data': multi_label_data})
ensemble_args_list.append(ensemble_args)
return ensemble_args_list
def set_ensemble_args(args, name=None,
objective_id=None, model_fields=None, fields=None):
"""Return ensemble arguments dict
"""
if name is None:
name = args.name
if objective_id is None:
objective_id = args.objective_id_
if model_fields is None:
model_fields = args.model_fields_
ensemble_args = set_basic_model_args(args, name)
ensemble_args.update({
"missing_splits": args.missing_splits,
"ensemble_sample": {"seed": SEED if args.ensemble_sample_seed is None \
else args.ensemble_sample_seed},
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
ensemble_args.update({"objective_field": objective_id})
if args.boosting:
boosting_args = {}
for option in BOOSTING_OPTIONS:
if hasattr(args, option) and getattr(args, option) is not None:
boosting_args.update({option: getattr(args, option)})
ensemble_args.update({"boosting": boosting_args})
else:
ensemble_args.update({"number_of_models": args.number_of_models})
# If evaluate flag is on and no test_split flag is provided,
# we choose a deterministic sampling with
# args.sample_rate (80% by default) of the data to create the model
if (args.evaluate and args.test_split == 0 and
args.test_datasets is None and not args.dataset_off):
ensemble_args.update({"seed": SEED})
if args.sample_rate == 1:
args.sample_rate = EVALUATE_SAMPLE_RATE
if model_fields and fields is not None:
input_fields = configure_input_fields(fields, model_fields)
ensemble_args.update(input_fields=input_fields)
if args.pruning and args.pruning != 'smart':
ensemble_args.update(stat_pruning=(args.pruning == 'statistical'))
if args.node_threshold > 0:
ensemble_args.update(node_threshold=args.node_threshold)
if args.balance:
ensemble_args.update(balance_objective=True)
if args.weight_field:
try:
weight_field = fields.field_id(args.weight_field)
except ValueError, exc:
sys.exit(exc)
ensemble_args.update(weight_field=weight_field)
if args.objective_weights:
ensemble_args.update(objective_weights=args.objective_weights_json)
if args.random_candidates:
ensemble_args.update(random_candidates=args.random_candidates)
update_attributes(ensemble_args, args.json_args.get('model'))
ensemble_args = update_sample_parameters_args(ensemble_args, args)
ensemble_args["ensemble_sample"].update( \
{"rate": args.ensemble_sample_rate,
"replacement": args.ensemble_sample_replacement})
if 'ensemble' in args.json_args:
update_json_args(ensemble_args, args.json_args.get('ensemble'), fields)
return ensemble_args
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
number_of_ensembles=1,
api=None, path=None, session_file=None, log=None):
"""Create ensembles from input data
"""
if api is None:
api = bigml.api.BigML()
ensembles = ensemble_ids[:]
existing_ensembles = len(ensembles)
model_ids = []
ensemble_args_list = []
if isinstance(ensemble_args, list):
ensemble_args_list = ensemble_args
if args.dataset_off and args.evaluate:
args.test_dataset_ids = datasets[:]
if not args.multi_label:
datasets = datasets[existing_ensembles:]
if number_of_ensembles > 0:
message = dated("Creating %s.\n" %
plural("ensemble", number_of_ensembles))
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_ensembles):
wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
api, "ensemble",
wait_step=args.number_of_models)
if ensemble_args_list:
ensemble_args = ensemble_args_list[i]
if args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_ensembles]
ensemble = api.create_ensemble(multi_dataset,
ensemble_args,
retries=None)
else:
ensemble = api.create_ensemble(datasets, ensemble_args,
retries=None)
ensemble_id = check_resource_error(ensemble,
"Failed to create ensemble: ")
log_message("%s\n" % ensemble_id, log_file=log)
ensemble_ids.append(ensemble_id)
inprogress.append(ensemble_id)
ensembles.append(ensemble)
log_created_resources("ensembles", path, ensemble_id,
mode='a')
models, model_ids = retrieve_ensembles_models(ensembles, api, path)
if number_of_ensembles < 2 and args.verbosity:
message = dated("Ensemble created: %s\n" %
get_url(ensemble))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, ensemble)
return ensembles, ensemble_ids, models, model_ids
def retrieve_ensembles_models(ensembles, api, path=None):
"""Retrieves the models associated to a list of ensembles
"""
models = []
model_ids = []
for index in range(0, len(ensembles)):
ensemble = ensembles[index]
if (isinstance(ensemble, basestring) or
bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
try:
ensemble = check_resource(ensemble, api.get_ensemble)
ensembles[index] = ensemble
except ValueError, exception:
sys.exit("Failed to get a finished ensemble: %s" %
str(exception))
model_ids.extend(ensemble['object']['models'])
if path is not None:
for model_id in model_ids:
log_created_resources("models", path, model_id, mode='a')
models = model_ids[:]
models[0] = check_resource(models[0], api.get_model,
query_string=ALL_FIELDS_QS)
return models, model_ids
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
"""Retrieves remote ensemble in its actual status
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(ensemble, basestring) or
bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
message = dated("Retrieving ensemble. %s\n" %
get_url(ensemble))
log_message(message, log_file=session_file,
console=verbosity)
ensemble = check_resource(ensemble, api.get_ensemble)
check_resource_error(ensemble, "Failed to get ensemble: ")
return ensemble
def set_publish_model_args(args):
"""Set args to publish model
"""
public_model = {}
if args.black_box:
public_model = {"private": False}
if args.white_box:
public_model = {"private": False, "white_box": True}
if args.model_price:
public_model.update(price=args.model_price)
if args.cpp:
public_model.update(credits_per_prediction=args.cpp)
return public_model
def map_fields(fields_map, model_fields, dataset_fields):
"""Build a dict to map model to dataset fields
"""
update_map = {}
for (model_column, dataset_column) in fields_map.iteritems():
try:
update_map.update({
model_fields.field_id(model_column):
dataset_fields.field_id(dataset_column)})
except ValueError, exc:
sys.exit(exc)
return update_map
def set_evaluation_args(args, fields=None,
dataset_fields=None, name=None):
"""Return evaluation args dict
"""
if name is None:
name = args.name
evaluation_args = set_basic_args(args, name)
if hasattr(args, 'method') and (args.number_of_models > 1
or args.ensemble):
evaluation_args.update(combiner=args.method)
if hasattr(args, 'method') and args.method:
evaluation_args.update({"combiner": args.method})
if args.method == THRESHOLD_CODE:
threshold = {}
if hasattr(args, 'threshold') and args.threshold is not None:
threshold.update(k=args.threshold)
if hasattr(args, 'threshold_class') \
and args.threshold_class is not None:
threshold.update({"class": args.threshold_class})
evaluation_args.update(threshold=threshold)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
evaluation_args.update({"fields_map": map_fields(args.fields_map_,
fields,
dataset_fields)})
if hasattr(args, 'missing_strategy') and args.missing_strategy:
evaluation_args.update(missing_strategy=args.missing_strategy)
if 'evaluation' in args.json_args:
update_json_args(
evaluation_args, args.json_args.get('evaluation'), fields)
# if evaluating time series we need to use ranges
if args.subcommand == "time-series" and args.test_split == 0 and \
not args.has_test_datasets_:
args.range_ = [int(args.max_rows * EVALUATE_SAMPLE_RATE) + 1,
args.max_rows]
evaluation_args.update({"range": args.range_})
return evaluation_args
# Two cases to use out_of_bag and sample_rate: standard evaluations where
# only the training set is provided, and cross_validation
# [--dataset|--test] [--model|--models|--model-tag|--ensemble] --evaluate
if (((hasattr(args, "dataset") and args.dataset) or args.test_set)
and args.has_supervised_):
return evaluation_args
# [--train|--dataset] --test-split --evaluate
if args.test_split > 0 and (args.training_set or args.dataset):
return evaluation_args
# --datasets --test-datasets or equivalents
#if args.datasets and (args.test_datasets or args.dataset_off):
if args.has_datasets_ and (args.has_test_datasets_ or args.dataset_off):
return evaluation_args
if args.sample_rate == 1:
args.sample_rate = EVALUATE_SAMPLE_RATE
evaluation_args.update(out_of_bag=True, seed=SEED,
sample_rate=args.sample_rate)
return evaluation_args
def set_label_evaluation_args(args, labels, all_labels,
number_of_evaluations, fields, dataset_fields,
objective_field):
"""Set of args needed to build an evaluation per label
"""
if objective_field is None:
try:
objective_id = fields.field_id(fields.objective_field)
except ValueError, exc:
sys.exit(exc)
objective_field = fields.fields[objective_id]['name']
evaluation_args_list = []
for index in range(number_of_evaluations - 1, -1, -1):
label = labels[index]
new_name = label_model_args(
args.name, label, all_labels, [], objective_field)[0]
evaluation_args = set_evaluation_args(args,
fields=fields,
dataset_fields=dataset_fields,
name=new_name)
evaluation_args_list.append(evaluation_args)
return evaluation_args_list
def create_evaluations(model_or_ensemble_ids, datasets, evaluation_args,
args, api=None,
path=None, session_file=None, log=None,
existing_evaluations=0):
"""Create evaluations for a list of models
``model_or_ensemble_ids``: list of model or ensemble ids to create
an evaluation of
``datasets``: dataset objects or ids to evaluate with
``evaluation_args``: arguments for the ``create_evaluation`` call
``args``: input values for bigmler flags
``api``: api to remote objects in BigML
``path``: directory to store the BigMLer generated files in
``session_file``: file to store the messages of that session
``log``: user provided log file
``existing_evaluations``: evaluations found when attempting resume
"""
evaluations = []
dataset = datasets[0]
evaluation_args_list = []
if isinstance(evaluation_args, list):
evaluation_args_list = evaluation_args
if api is None:
api = bigml.api.BigML()
remaining_ids = model_or_ensemble_ids[existing_evaluations:]
if args.test_dataset_ids or args.dataset_off:
remaining_datasets = datasets[existing_evaluations:]
number_of_evaluations = len(remaining_ids)
message = dated("Creating evaluations.\n")
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_evaluations):
model = remaining_ids[i]
if args.test_dataset_ids or args.dataset_off:
dataset = remaining_datasets[i]
wait_for_available_tasks(inprogress, args.max_parallel_evaluations,
api, "evaluation")
if evaluation_args_list != []:
evaluation_args = evaluation_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_evaluations)
evaluation_args.update(seed=new_seed)
evaluation = api.create_evaluation(model, dataset, evaluation_args,
retries=None)
evaluation_id = check_resource_error(evaluation,
"Failed to create evaluation: ")
inprogress.append(evaluation_id)
log_created_resources("evaluations", path, evaluation_id,
mode='a')
evaluations.append(evaluation)
log_message("%s\n" % evaluation['resource'], log_file=log)
if (args.number_of_evaluations < 2 and len(evaluations) == 1
and args.verbosity):
evaluation = evaluations[0]
if bigml.api.get_status(evaluation)['code'] != bigml.api.FINISHED:
try:
evaluation = check_resource(evaluation, api.get_evaluation)
except ValueError, exception:
sys.exit("Failed to get a finished evaluation: %s" %
str(exception))
evaluations[0] = evaluation
message = dated("Evaluation created: %s\n" %
get_url(evaluation))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, evaluation)
return evaluations
def get_evaluation(evaluation, api=None, verbosity=True, session_file=None):
"""Retrieves evaluation in its actual state
"""
if api is None:
api = bigml.api.BigML()
message = dated("Retrieving evaluation. %s\n" %
get_url(evaluation))
log_message(message, log_file=session_file, console=verbosity)
try:
evaluation = check_resource(evaluation, api.get_evaluation)
except ValueError, exception:
sys.exit("Failed to get a finished evaluation: %s" % str(exception))
return evaluation
def update_evaluation(evaluation, evaluation_args, args,
api=None, path=None, session_file=None):
"""Updates evaluation properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating evaluation. %s\n" %
get_url(evaluation))
log_message(message, log_file=session_file,
console=args.verbosity)
evaluation = api.update_evaluation(evaluation, evaluation_args)
check_resource_error(evaluation, "Failed to update evaluation: %s"
% evaluation['resource'])
evaluation = check_resource(evaluation, api.get_evaluation)
if is_shared(evaluation):
message = dated("Shared evaluation link. %s\n" %
get_url(evaluation, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, evaluation)
return evaluation
def save_evaluation(evaluation, output, api=None):
"""Creates the evaluation .txt and .json files
"""
if api is None:
api = bigml.api.BigML()
evaluation = evaluation.get('object', evaluation).get('result', evaluation)
save_txt_and_json(evaluation, output, api=api)
def save_txt_and_json(object_dict, output, api=None):
"""Saves in txt and JSON format the contents of a dict object
"""
open_mode = 'wt' if PYTHON3 else 'wb'
message = json.dumps(object_dict)
if not PYTHON3:
message = utf8(message)
with open(output + '.json', open_mode) as dict_json:
dict_json.write(message)
with open(output + '.txt', open_mode) as dict_txt:
api.pprint(object_dict, dict_txt)
def set_batch_prediction_args(args, fields=None,
dataset_fields=None):
"""Return batch prediction args dict
"""
batch_prediction_args = set_basic_batch_args(args, args.name)
if hasattr(args, 'method') and args.method:
batch_prediction_args.update({"combiner": args.method})
if args.method == THRESHOLD_CODE:
threshold = {}
if hasattr(args, 'threshold') and args.threshold is not None:
threshold.update(k=args.threshold)
if hasattr(args, 'threshold_class') \
and args.threshold_class is not None:
threshold.update({"class": args.threshold_class})
batch_prediction_args.update(threshold=threshold)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_prediction_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info in [NORMAL_FORMAT, FULL_FORMAT]:
if (hasattr(args, 'boosting') and args.boosting) or \
(hasattr(args, 'probability') and args.probability):
batch_prediction_args.update(probability=True)
else:
batch_prediction_args.update(confidence=True)
if args.prediction_info == FULL_FORMAT:
batch_prediction_args.update(all_fields=True)
if hasattr(args, 'prediction_name') and args.prediction_name:
batch_prediction_args.update(prediction_name=args.prediction_name)
if args.prediction_fields:
batch_prediction_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_prediction_args.update(output_fields=prediction_fields)
if hasattr(args, 'missing_strategy') and args.missing_strategy:
batch_prediction_args.update(missing_strategy=args.missing_strategy)
if hasattr(args, "operating_point_") and args.operating_point_:
batch_prediction_args.update(operating_point=args.operating_point_)
if args.operating_point_.get("kind") == "probability":
batch_prediction_args.update({"probability": True,
"confidence": False})
if 'batch_prediction' in args.json_args:
update_json_args(
batch_prediction_args,
args.json_args.get('batch_prediction'),
fields)
return batch_prediction_args
def create_batch_prediction(model_or_ensemble, test_dataset,
batch_prediction_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch_prediction
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch prediction.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_prediction = api.create_batch_prediction(model_or_ensemble,
test_dataset,
batch_prediction_args,
retries=None)
log_created_resources("batch_prediction", path,
bigml.api.get_batch_prediction_id(batch_prediction),
mode='a')
batch_prediction_id = check_resource_error(
batch_prediction, "Failed to create batch prediction: ")
try:
batch_prediction = check_resource(batch_prediction,
api.get_batch_prediction)
except ValueError, exception:
sys.exit("Failed to get a finished batch prediction: %s"
% str(exception))
message = dated("Batch prediction created: %s\n"
% get_url(batch_prediction))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_prediction_id, log_file=log)
if args.reports:
report(args.reports, path, batch_prediction)
return batch_prediction
def set_cluster_args(args, name=None, fields=None,
cluster_fields=None):
"""Return cluster arguments dict
"""
if name is None:
name = args.name
if cluster_fields is None:
cluster_fields = args.cluster_fields_
cluster_args = set_basic_model_args(args, name)
cluster_args.update({
"seed": SEED if args.seed is None else args.seed,
"cluster_seed": (SEED if args.cluster_seed is None
else args.cluster_seed)
})
if args.cluster_models is not None:
cluster_args.update({"model_clusters": True})
if args.cluster_k:
cluster_args.update({"k": args.cluster_k})
if cluster_fields and fields is not None:
input_fields = configure_input_fields(fields, cluster_fields)
cluster_args.update(input_fields=input_fields)
if args.summary_fields is not None:
cluster_args.update({"summary_fields": args.summary_fields_})
cluster_args = update_sample_parameters_args(cluster_args, args)
if 'cluster' in args.json_args:
update_json_args(cluster_args, args.json_args.get('cluster'), fields)
return cluster_args
def create_clusters(datasets, cluster_ids, cluster_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote clusters
"""
if api is None:
api = bigml.api.BigML()
clusters = cluster_ids[:]
existing_clusters = len(clusters)
cluster_args_list = []
datasets = datasets[existing_clusters:]
# if resuming and all clusters were created, there will be no datasets left
if datasets:
if isinstance(cluster_args, list):
cluster_args_list = cluster_args
# Only one cluster per command, at present
number_of_clusters = 1
message = dated("Creating %s.\n" %
plural("cluster", number_of_clusters))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_clusters):
wait_for_available_tasks(inprogress, args.max_parallel_clusters,
api, "cluster")
if cluster_args_list:
cluster_args = cluster_args_list[i]
cluster = api.create_cluster(datasets, cluster_args, retries=None)
cluster_id = check_resource_error(cluster,
"Failed to create cluster: ")
log_message("%s\n" % cluster_id, log_file=log)
cluster_ids.append(cluster_id)
inprogress.append(cluster_id)
clusters.append(cluster)
log_created_resources("clusters", path, cluster_id, mode='a')
if args.verbosity:
if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED:
try:
cluster = check_resource(cluster, api.get_cluster,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished cluster: %s" %
str(exception))
clusters[0] = cluster
message = dated("Cluster created: %s\n" %
get_url(cluster))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, cluster)
return clusters, cluster_ids
def get_clusters(cluster_ids, args, api=None, session_file=None):
"""Retrieves remote clusters in its actual status
"""
if api is None:
api = bigml.api.BigML()
cluster_id = ""
clusters = cluster_ids
cluster_id = cluster_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("cluster", len(cluster_ids)),
get_url(cluster_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one cluster to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
cluster = check_resource(cluster_ids[0], api.get_cluster,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished cluster: %s" % str(exception))
clusters[0] = cluster
return clusters, cluster_ids
def set_batch_centroid_args(args, fields=None,
dataset_fields=None):
"""Return batch centroid args dict
"""
batch_centroid_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_centroid_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_centroid_args.update(all_fields=True)
if args.prediction_fields:
batch_centroid_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_centroid_args.update(output_fields=prediction_fields)
if 'batch_centroid' in args.json_args:
update_json_args(
batch_centroid_args, args.json_args.get('batch_centroid'), fields)
return batch_centroid_args
def create_batch_centroid(cluster, test_dataset,
batch_centroid_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch_centroid
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch centroid.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_centroid = api.create_batch_centroid(cluster,
test_dataset,
batch_centroid_args,
retries=None)
log_created_resources("batch_centroid", path,
bigml.api.get_batch_centroid_id(batch_centroid),
mode='a')
batch_centroid_id = check_resource_error(
batch_centroid, "Failed to create batch prediction: ")
try:
batch_centroid = check_resource(batch_centroid,
api.get_batch_centroid)
except ValueError, exception:
sys.exit("Failed to get a finished batch centroid: %s"
% str(exception))
message = dated("Batch centroid created: %s\n"
% get_url(batch_centroid))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_centroid_id, log_file=log)
if args.reports:
report(args.reports, path, batch_centroid)
return batch_centroid
def set_publish_cluster_args(args):
"""Set args to publish cluster
"""
public_cluster = {}
if args.public_cluster:
public_cluster = {"private": False}
if args.model_price:
public_cluster.update(price=args.model_price)
if args.cpp:
public_cluster.update(credits_per_prediction=args.cpp)
return public_cluster
def update_cluster(cluster, cluster_args, args,
api=None, path=None, session_file=None):
"""Updates cluster properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating cluster. %s\n" %
get_url(cluster))
log_message(message, log_file=session_file,
console=args.verbosity)
cluster = api.update_cluster(cluster, cluster_args)
check_resource_error(cluster, "Failed to update cluster: %s"
% cluster['resource'])
cluster = check_resource(cluster, api.get_cluster, query_string=FIELDS_QS)
if is_shared(cluster):
message = dated("Shared cluster link. %s\n" %
get_url(cluster, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, cluster)
return cluster
def set_batch_anomaly_score_args(args, fields=None,
dataset_fields=None):
"""Return batch anomaly score args dict
"""
batch_anomaly_score_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_anomaly_score_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_anomaly_score_args.update(all_fields=True)
if args.prediction_fields:
batch_anomaly_score_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_anomaly_score_args.update(output_fields=prediction_fields)
if 'batch_anomaly_score' in args.json_args:
update_json_args(
batch_anomaly_score_args,
args.json_args.get('batch_anomaly_score'),
fields)
return batch_anomaly_score_args
def set_anomaly_args(args, name=None, fields=None, anomaly_fields=None):
"""Return anomaly arguments dict
"""
if name is None:
name = args.name
if anomaly_fields is None:
anomaly_fields = args.anomaly_fields_
anomaly_args = set_basic_model_args(args, name)
anomaly_args.update({
"seed": SEED if args.seed is None else args.seed,
"anomaly_seed": (SEED if args.anomaly_seed is None
else args.anomaly_seed)
})
if anomaly_fields and fields is not None:
input_fields = configure_input_fields(fields, anomaly_fields)
anomaly_args.update(input_fields=input_fields)
if args.top_n > 0:
anomaly_args.update(top_n=args.top_n)
if args.forest_size > 0:
anomaly_args.update(forest_size=args.forest_size)
anomaly_args = update_sample_parameters_args(anomaly_args, args)
if 'anomaly' in args.json_args:
update_json_args(anomaly_args, args.json_args.get('anomaly'), fields)
return anomaly_args
def set_publish_anomaly_args(args):
"""Set args to publish anomaly
"""
public_anomaly = {}
if args.public_anomaly:
public_anomaly = {"private": False}
if args.model_price:
public_anomaly.update(price=args.model_price)
if args.cpp:
public_anomaly.update(credits_per_prediction=args.cpp)
return public_anomaly
def create_anomalies(datasets, anomaly_ids, anomaly_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote anomalies
"""
if api is None:
api = bigml.api.BigML()
anomalies = anomaly_ids[:]
existing_anomalies = len(anomalies)
anomaly_args_list = []
datasets = datasets[existing_anomalies:]
# if resuming and all anomalies were created,
# there will be no datasets left
if datasets:
if isinstance(anomaly_args, list):
anomaly_args_list = anomaly_args
# Only one anomaly per command, at present
number_of_anomalies = 1
message = dated("Creating %s.\n" %
plural("anomaly detector", number_of_anomalies))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_anomalies):
wait_for_available_tasks(inprogress, args.max_parallel_anomalies,
api, "anomaly")
if anomaly_args_list:
anomaly_args = anomaly_args_list[i]
anomaly = api.create_anomaly(datasets, anomaly_args, retries=None)
anomaly_id = check_resource_error(anomaly,
"Failed to create anomaly: ")
log_message("%s\n" % anomaly_id, log_file=log)
anomaly_ids.append(anomaly_id)
inprogress.append(anomaly_id)
anomalies.append(anomaly)
log_created_resources("anomalies", path, anomaly_id, mode='a')
if args.verbosity:
if bigml.api.get_status(anomaly)['code'] != bigml.api.FINISHED:
try:
anomaly = api.check_resource(anomaly,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished anomaly: %s" %
str(exception))
anomalies[0] = anomaly
message = dated("Anomaly created: %s\n" %
get_url(anomaly))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, anomaly)
return anomalies, anomaly_ids
def get_anomalies(anomaly_ids, args, api=None, session_file=None):
"""Retrieves remote anomalies in its actual status
"""
if api is None:
api = bigml.api.BigML()
anomaly_id = ""
anomalies = anomaly_ids
anomaly_id = anomaly_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("anomaly detector", len(anomaly_ids)),
get_url(anomaly_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one anomaly to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
anomaly = api.check_resource(anomaly_ids[0],
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished anomaly: %s" % str(exception))
anomalies[0] = anomaly
return anomalies, anomaly_ids
def create_batch_anomaly_score(anomaly, test_dataset,
batch_anomaly_score_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch anomaly score
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch anomaly score.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_anomaly_score = api.create_batch_anomaly_score(
anomaly, test_dataset, batch_anomaly_score_args, retries=None)
log_created_resources(
"batch_anomaly_score", path,
bigml.api.get_batch_anomaly_score_id(batch_anomaly_score),
mode='a')
batch_anomaly_score_id = check_resource_error(
batch_anomaly_score, "Failed to create batch prediction: ")
try:
batch_anomaly_score = api.check_resource(batch_anomaly_score)
except ValueError, exception:
sys.exit("Failed to get a finished batch anomaly score: %s"
% str(exception))
message = dated("Batch anomaly score created: %s\n"
% get_url(batch_anomaly_score))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_anomaly_score_id, log_file=log)
if args.reports:
report(args.reports, path, batch_anomaly_score)
return batch_anomaly_score
def update_anomaly(anomaly, anomaly_args, args,
api=None, path=None, session_file=None):
"""Updates anomaly properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating anomaly detector. %s\n" %
get_url(anomaly))
log_message(message, log_file=session_file,
console=args.verbosity)
anomaly = api.update_anomaly(anomaly, anomaly_args)
check_resource_error(anomaly, "Failed to update anomaly: %s"
% anomaly['resource'])
anomaly = api.check_resource(anomaly, query_string=FIELDS_QS)
if is_shared(anomaly):
message = dated("Shared anomaly link. %s\n" %
get_url(anomaly, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, anomaly)
return anomaly
def set_project_args(args, name=None):
"""Return project arguments dict
"""
if name is None:
name = args.name
project_args = set_basic_args(args, name)
if 'project' in args.json_args:
update_json_args(project_args, args.json_args.get('project'), None)
return project_args
def create_project(project_args, args, api=None,
session_file=None, path=None, log=None):
"""Creates remote project
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating project.\n")
log_message(message, log_file=session_file, console=args.verbosity)
project = api.create_project(project_args)
log_created_resources("project", path,
bigml.api.get_project_id(project), mode='a')
project_id = check_resource_error(project, "Failed to create project: ")
try:
project = check_resource(project, api=api)
except ValueError, exception:
sys.exit("Failed to get a finished project: %s" % str(exception))
message = dated("Project \"%s\" has been created.\n" %
project['object']['name'])
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % project_id, log_file=log)
try:
if args.reports:
report(args.reports, path, project)
except AttributeError:
pass
return project
def update_project(project_args, args,
api=None, session_file=None, log=None):
"""Updates project properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating project attributes.\n")
log_message(message, log_file=session_file,
console=args.verbosity)
project = api.update_project(args.project_id, project_args)
check_resource_error(project, "Failed to update project: %s"
% project['resource'])
message = dated("Project \"%s\" has been updated.\n" %
project['resource'])
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % args.project_id, log_file=log)
return project
def get_project_by_name(project, api=None, verbosity=True, session_file=None):
"""Retrieves the project info by project name
"""
if api is None:
api = bigml.api.BigML()
project_id = None
if (isinstance(project, basestring) or
bigml.api.get_status(project)['code'] != bigml.api.FINISHED):
message = dated("Retrieving project info.\n")
log_message(message, log_file=session_file,
console=verbosity)
projects = api.list_projects(query_string="name=%s" % project)
projects = projects.get('objects', [])
if projects:
project_id = projects[0]['resource']
return project_id
def set_sample_args(args, name=None):
"""Return sample arguments dict
"""
if name is None:
name = args.name
sample_args = set_basic_args(args, name)
if 'sample' in args.json_args:
update_json_args(sample_args, args.json_args.get('sample'))
return sample_args
def create_samples(datasets, sample_ids, sample_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote samples
"""
if api is None:
api = bigml.api.BigML()
samples = sample_ids[:]
existing_samples = len(samples)
sample_args_list = []
datasets = datasets[existing_samples:]
# if resuming and all samples were created, there will be no datasets left
if datasets:
if isinstance(sample_args, list):
sample_args_list = sample_args
# Only one sample per command, at present
number_of_samples = 1
max_parallel_samples = 1
message = dated("Creating %s.\n" %
plural("sample", number_of_samples))
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_samples):
wait_for_available_tasks(inprogress, max_parallel_samples,
api, "sample")
if sample_args_list:
sample_args = sample_args_list[i]
sample = api.create_sample(datasets[i], sample_args, retries=None)
sample_id = check_resource_error(sample,
"Failed to create sample: ")
log_message("%s\n" % sample_id, log_file=log)
sample_ids.append(sample_id)
inprogress.append(sample_id)
samples.append(sample)
log_created_resources("samples", path, sample_id, mode='a')
if args.verbosity:
if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED:
try:
sample = check_resource(sample, api.get_sample)
except ValueError, exception:
sys.exit("Failed to get a finished sample: %s" %
str(exception))
samples[0] = sample
message = dated("Sample created: %s\n" %
get_url(sample))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, sample)
return samples, sample_ids
def update_sample(sample, sample_args, args,
api=None, path=None, session_file=None):
"""Updates sample properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating sample. %s\n" %
get_url(sample))
log_message(message, log_file=session_file,
console=args.verbosity)
sample = api.update_sample(sample, sample_args)
check_resource_error(sample, "Failed to update sample: %s"
% sample['resource'])
sample = check_resource(sample, api.get_sample)
if is_shared(sample):
message = dated("Shared sample link. %s\n" %
get_url(sample, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, sample)
return sample
def get_samples(sample_ids, args,
api=None, session_file=None, query_string=''):
"""Retrieves remote samples in its actual status
"""
if api is None:
api = bigml.api.BigML()
sample_id = ""
samples = sample_ids
sample_id = sample_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("sample", len(sample_ids)),
get_url(sample_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one sample to predict at present
try:
sample = api.get_sample(sample_ids[0],
query_string=query_string)
check_resource_error(sample, "Failed to create sample: %s"
% sample['resource'])
sample = check_resource(sample, api=api, query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished sample: %s" % str(exception))
samples[0] = sample
return samples, sample_ids
def set_publish_sample_args(args):
"""Set args to publish sample
"""
public_sample = {}
if args.public_sample:
public_sample = {"private": False}
return public_sample
def set_association_args(args, name=None, fields=None,
association_fields=None):
"""Return association arguments dict
"""
if name is None:
name = args.name
if association_fields is None:
association_fields = args.association_fields_
association_args = set_basic_model_args(args, name)
if association_fields and fields is not None:
input_fields = configure_input_fields(fields, association_fields)
association_args.update(input_fields=input_fields)
if args.association_k:
association_args.update({"max_k": args.association_k})
if args.search_strategy:
association_args.update({"search_strategy": args.search_strategy})
association_args = update_sample_parameters_args(association_args, args)
if 'association' in args.json_args:
update_json_args(association_args,
args.json_args.get('association'), fields)
return association_args
def create_associations(datasets, association_ids, association_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote associations
"""
if api is None:
api = bigml.api.BigML()
associations = association_ids[:]
existing_associations = len(associations)
association_args_list = []
datasets = datasets[existing_associations:]
# if resuming and all associations were created,
# there will be no datasets left
if datasets:
if isinstance(association_args, list):
association_args_list = association_args
# Only one association per command, at present
number_of_associations = 1
message = dated("Creating %s.\n" %
plural("association", number_of_associations))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_associations):
wait_for_available_tasks(inprogress,
args.max_parallel_associations,
api, "association")
if association_args_list:
association_args = association_args_list[i]
association = api.create_association(
datasets, association_args, retries=None)
association_id = check_resource_error( \
association, "Failed to create association: ")
log_message("%s\n" % association_id, log_file=log)
association_ids.append(association_id)
inprogress.append(association_id)
associations.append(association)
log_created_resources( \
"associations", path, association_id, mode='a')
if args.verbosity:
if bigml.api.get_status(association)['code'] != bigml.api.FINISHED:
try:
association = check_resource( \
association, api.get_association,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished association: %s" %
str(exception))
associations[0] = association
message = dated("Association created: %s\n" %
get_url(association))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, association)
return associations, association_ids
def get_associations(association_ids, args, api=None, session_file=None):
"""Retrieves remote associations in its actual status
"""
if api is None:
api = bigml.api.BigML()
association_id = ""
associations = association_ids
association_id = association_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("association", len(association_ids)),
get_url(association_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one association to predict at present
try:
query_string = FIELDS_QS
association = check_resource(association_ids[0], api.get_association,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished association: %s" % str(exception))
associations[0] = association
return associations, association_ids
def set_publish_association_args(args):
"""Set args to publish association
"""
public_association = {}
if args.public_association:
public_association = {"private": False}
if args.model_price:
public_association.update(price=args.model_price)
if args.cpp:
public_association.update(credits_per_prediction=args.cpp)
return public_association
def update_association(association, association_args, args,
api=None, path=None, session_file=None):
"""Updates association properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating association. %s\n" %
get_url(association))
log_message(message, log_file=session_file,
console=args.verbosity)
association = api.update_association(association, association_args)
check_resource_error(association, "Failed to update association: %s"
% association['resource'])
association = check_resource(association,
api.get_association, query_string=FIELDS_QS)
if is_shared(association):
message = dated("Shared association link. %s\n" %
get_url(association, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, association)
return association
def set_script_args(args, name=None):
"""Returns a script arguments dict
"""
if name is None:
name = args.name
script_args = set_basic_args(args, name)
if args.project_id is not None:
script_args.update({"project": args.project_id})
if args.imports is not None:
script_args.update({"imports": args.imports_})
if args.parameters_ is not None:
script_args.update({"inputs": args.parameters_})
if args.declare_outputs_:
script_args.update({"outputs": args.declare_outputs_})
update_attributes(script_args, args.json_args.get('script'))
return script_args
def create_script(source_code, script_args, args, api=None, path=None,
session_file=None, log=None):
"""Creates remote script
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating script \"%s\".\n" % script_args["name"])
log_message(message, log_file=session_file, console=args.verbosity)
script = api.create_script(source_code, script_args)
log_created_resources("scripts", path,
bigml.api.get_script_id(script), mode='a')
script_id = check_resource_error(script, "Failed to create script: ")
try:
script = check_resource(script, api.get_script)
except ValueError, exception:
sys.exit("Failed to get a compiled script: %s" % str(exception))
message = dated("Script created: %s\n" % get_url(script))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % script_id, log_file=log)
return script
def get_script(script, api=None, verbosity=True,
session_file=None):
"""Retrieves the script in its actual state
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(script, basestring) or
bigml.api.get_status(script)['code'] != bigml.api.FINISHED):
message = dated("Retrieving script. %s\n" %
get_url(script))
log_message(message, log_file=session_file,
console=verbosity)
try:
script = check_resource(script, api.get_script)
except ValueError, exception:
sys.exit("Failed to get a compiled script: %s" % str(exception))
return script
def set_execution_args(args, name=None):
"""Returns an execution arguments dict
"""
if name is None:
name = args.name
execution_args = set_basic_args(args, name)
if args.project_id is not None:
execution_args.update({"project": args.project_id})
if args.arguments_:
execution_args.update({"inputs": args.arguments_})
if args.creation_defaults is not None:
execution_args.update({"creation_defaults": args.creation_defaults_})
if args.outputs_:
execution_args.update({"outputs": args.outputs_})
if args.input_maps_:
execution_args.update({"input_maps_": args.input_maps_})
update_attributes(execution_args, args.json_args.get('execution'))
return execution_args
def create_execution(execution_args, args, api=None, path=None,
session_file=None, log=None):
"""Creates remote execution
"""
message = dated("Creating execution.\n")
log_message(message, log_file=session_file, console=args.verbosity)
scripts = args.script_ids if args.script_ids else args.script
execution = api.create_execution(scripts, execution_args)
log_created_resources("execution", path,
bigml.api.get_execution_id(execution), mode='a')
execution_id = check_resource_error(execution,
"Failed to create execution: ")
try:
execution = check_resource(execution, api.get_execution)
except ValueError, exception:
sys.exit("Failed to get a finished execution: %s" % str(exception))
message = dated("Execution created: %s\n" % get_url(execution))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % execution_id, log_file=log)
return execution
def get_execution(execution, api=None, verbosity=True,
session_file=None):
"""Retrieves the execution in its actual state
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(execution, basestring) or
bigml.api.get_status(execution)['code'] != bigml.api.FINISHED):
message = dated("Retrieving execution. %s\n" %
get_url(execution))
log_message(message, log_file=session_file,
console=verbosity)
try:
execution = check_resource(execution, api.get_execution)
except ValueError, exception:
sys.exit("Failed to get a finished execution: %s" % str(exception))
return execution
def set_logistic_regression_args(args, name=None, fields=None,
objective_id=None,
logistic_regression_fields=None):
"""Return logistic regression arguments dict
"""
if name is None:
name = args.name
if logistic_regression_fields is None:
logistic_regression_fields = args.logistic_regression_fields_
if objective_id is None:
objective_id = args.objective_id_
logistic_regression_args = set_basic_model_args(args, name)
logistic_regression_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
logistic_regression_args.update({"objective_field": objective_id})
if logistic_regression_fields and fields is not None:
input_fields = configure_input_fields(fields,
logistic_regression_fields)
logistic_regression_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
logistic_regression_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
logistic_regression_args.update({"sample_rate": args.sample_rate})
if args.lr_c:
logistic_regression_args.update({"c": args.lr_c})
logistic_regression_args.update({"bias": args.bias})
logistic_regression_args.update( \
{"balance_fields": args.balance_fields})
if args.eps:
logistic_regression_args.update({"eps": args.eps})
if args.normalize is not None:
logistic_regression_args.update({"normalize": args.normalize})
if args.missing_numerics is not None:
logistic_regression_args.update( \
{"missing_numerics": args.missing_numerics})
if args.field_codings is not None:
logistic_regression_args.update(\
{"field_codings": args.field_codings_})
logistic_regression_args = update_sample_parameters_args( \
logistic_regression_args, args)
if 'logistic_regression' in args.json_args:
update_json_args(logistic_regression_args,
args.json_args.get('logistic_regression'),
fields)
return logistic_regression_args
def create_logistic_regressions(datasets, logistic_regression_ids,
logistic_regression_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote logistic regressions
"""
if api is None:
api = bigml.api.BigML()
logistic_regressions = logistic_regression_ids[:]
existing_logistic_regressions = len(logistic_regressions)
logistic_regression_args_list = []
datasets = datasets[existing_logistic_regressions:]
# if resuming and all logistic regressions were created,
# there will be no datasets left
if datasets:
if isinstance(logistic_regression_args, list):
logistic_regression_args_list = logistic_regression_args
# Only one logistic regression per command, at present
number_of_logistic_regressions = 1
message = dated("Creating %s.\n" %
plural("logistic regression",
number_of_logistic_regressions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_logistic_regressions):
wait_for_available_tasks(inprogress,
args.max_parallel_logistic_regressions,
api, "logisticregression")
if logistic_regression_args_list:
logistic_regression_args = logistic_regression_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_logistic_regressions)
logistic_regression_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
logistic_regression = api.create_logistic_regression( \
dataset, logistic_regression_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_logistic_regressions]
logistic_regression = api.create_logistic_regression( \
multi_dataset, logistic_regression_args, retries=None)
else:
logistic_regression = api.create_logistic_regression( \
datasets, logistic_regression_args, retries=None)
logistic_regression_id = check_resource_error( \
logistic_regression, "Failed to create logistic regression: ")
log_message("%s\n" % logistic_regression_id, log_file=log)
logistic_regression_ids.append(logistic_regression_id)
inprogress.append(logistic_regression_id)
logistic_regressions.append(logistic_regression)
log_created_resources("logistic_regressions",
path,
logistic_regression_id, mode='a')
if args.verbosity:
if bigml.api.get_status(logistic_regression)['code'] != \
bigml.api.FINISHED:
try:
logistic_regression = check_resource( \
logistic_regression, api.get_logistic_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished logistic regression:"
" %s" %
str(exception))
logistic_regressions[0] = logistic_regression
message = dated("Logistic regression created: %s\n" %
get_url(logistic_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, logistic_regression)
return logistic_regressions, logistic_regression_ids
def get_logistic_regressions(logistic_regression_ids,
args, api=None, session_file=None):
"""Retrieves remote logistic regression in its actual status
"""
if api is None:
api = bigml.api.BigML()
logistic_regression_id = ""
logistic_regressions = logistic_regression_ids
logistic_regression_id = logistic_regression_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("logistic regression", len(logistic_regression_ids)),
get_url(logistic_regression_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one logistic regression to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
logistic_regression = check_resource(logistic_regression_ids[0],
api.get_logistic_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished logistic regression: %s" % \
str(exception))
logistic_regressions[0] = logistic_regression
return logistic_regressions, logistic_regression_ids
def set_publish_logistic_regression_args(args):
"""Set args to publish logistic regression
"""
public_logistic_regression = {}
if args.public_logistic_regression:
public_logistic_regression = {"private": False}
if args.model_price:
public_logistic_regression.update(price=args.model_price)
if args.cpp:
public_logistic_regression.update(credits_per_prediction=args.cpp)
return public_logistic_regression
def update_logistic_regression(logistic_regression, logistic_regression_args,
args, api=None, path=None, session_file=None):
"""Updates logistic regression properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating logistic regression. %s\n" %
get_url(logistic_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
logistic_regression = api.update_logistic_regression(logistic_regression, \
logistic_regression_args)
check_resource_error(logistic_regression,
"Failed to update logistic regression: %s"
% logistic_regression['resource'])
logistic_regression = check_resource(logistic_regression,
api.get_logistic_regression,
query_string=FIELDS_QS)
if is_shared(logistic_regression):
message = dated("Shared logistic regression link. %s\n" %
get_url(logistic_regression, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, logistic_regression)
return logistic_regression
def set_linear_regression_args(args, name=None, fields=None,
objective_id=None,
linear_regression_fields=None):
"""Return linear regression arguments dict
"""
if name is None:
name = args.name
if linear_regression_fields is None:
linear_regression_fields = args.linear_regression_fields_
if objective_id is None:
objective_id = args.objective_id_
linear_regression_args = set_basic_model_args(args, name)
linear_regression_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
linear_regression_args.update({"objective_field": objective_id})
if linear_regression_fields and fields is not None:
input_fields = configure_input_fields(fields, linear_regression_fields)
linear_regression_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
linear_regression_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
linear_regression_args.update({"sample_rate": args.sample_rate})
linear_regression_args.update({"bias": args.bias})
if args.field_codings is not None:
linear_regression_args.update(\
{"field_codings": args.field_codings_})
linear_regression_args = update_sample_parameters_args( \
linear_regression_args, args)
if 'linear_regression' in args.json_args:
update_json_args(linear_regression_args,
args.json_args.get('linear_regression'),
fields)
return linear_regression_args
def create_linear_regressions(datasets, linear_regression_ids,
linear_regression_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote linear regressions
"""
if api is None:
api = bigml.api.BigML()
linear_regressions = linear_regression_ids[:]
existing_linear_regressions = len(linear_regressions)
linear_regression_args_list = []
datasets = datasets[existing_linear_regressions:]
# if resuming and all linear regressions were created,
# there will be no datasets left
if datasets:
if isinstance(linear_regression_args, list):
linear_regression_args_list = linear_regression_args
# Only one linear regression per command, at present
number_of_linear_regressions = 1
message = dated("Creating %s.\n" %
plural("linear regression",
number_of_linear_regressions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_linear_regressions):
wait_for_available_tasks(inprogress,
args.max_parallel_linear_regressions,
api, "linearregression")
if linear_regression_args_list:
linear_regression_args = linear_regression_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_linear_regressions)
linear_regression_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
linear_regression = api.create_linear_regression( \
dataset, linear_regression_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_linear_regressions]
linear_regression = api.create_linear_regression( \
multi_dataset, linear_regression_args, retries=None)
else:
linear_regression = api.create_linear_regression( \
datasets, linear_regression_args, retries=None)
linear_regression_id = check_resource_error( \
linear_regression, "Failed to create linear regression: ")
log_message("%s\n" % linear_regression_id, log_file=log)
linear_regression_ids.append(linear_regression_id)
inprogress.append(linear_regression_id)
linear_regressions.append(linear_regression)
log_created_resources("linear_regressions",
path,
linear_regression_id, mode='a')
if args.verbosity:
if bigml.api.get_status(linear_regression)['code'] != \
bigml.api.FINISHED:
try:
linear_regression = check_resource( \
linear_regression, api.get_linear_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished linear regression:"
" %s" %
str(exception))
linear_regressions[0] = linear_regression
message = dated("linear regression created: %s\n" %
get_url(linear_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, linear_regression)
return linear_regressions, linear_regression_ids
def get_linear_regressions(linear_regression_ids,
args, api=None, session_file=None):
"""Retrieves remote linear regression in its actual status
"""
if api is None:
api = bigml.api.BigML()
linear_regression_id = ""
linear_regressions = linear_regression_ids
linear_regression_id = linear_regression_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("linear regression", len(linear_regression_ids)),
get_url(linear_regression_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one linear regression to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
linear_regression = check_resource(linear_regression_ids[0],
api.get_linear_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished linear regression: %s" % \
str(exception))
linear_regressions[0] = linear_regression
return linear_regressions, linear_regression_ids
def set_publish_linear_regression_args(args):
"""Set args to publish linear regression
"""
public_linear_regression = {}
if args.public_linear_regression:
public_linear_regression = {"private": False}
if args.model_price:
public_linear_regression.update(price=args.model_price)
if args.cpp:
public_linear_regression.update(credits_per_prediction=args.cpp)
return public_linear_regression
def update_linear_regression(linear_regression, linear_regression_args,
args, api=None, path=None, session_file=None):
"""Updates linear regression properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating linear regression. %s\n" %
get_url(linear_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
linear_regression = api.update_linear_regression(linear_regression, \
linear_regression_args)
check_resource_error(linear_regression,
"Failed to update linear regression: %s"
% linear_regression['resource'])
linear_regression = check_resource(linear_regression,
api.get_linear_regression,
query_string=FIELDS_QS)
if is_shared(linear_regression):
message = dated("Shared linear regression link. %s\n" %
get_url(linear_regression, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, linear_regression)
return linear_regression
def set_time_series_args(args, name=None, fields=None,
objective_id=None,
time_series_fields=None):
"""Return time-series arguments dict
"""
if name is None:
name = args.name
if objective_id is None:
objective_id = args.objective_id_
time_series_args = set_basic_model_args(args, name)
time_series_args.update({
"all_numeric_objectives": args.all_numeric_objectives,
"period": args.period
})
# if we need to evaluate and there's no previous split, use a range
if args.evaluate and args.test_split == 0 and not args.has_test_datasets_:
args.range_ = [1, int(args.max_rows * EVALUATE_SAMPLE_RATE)]
if objective_id is not None:
time_series_args.update({"objective_field": objective_id})
if args.objectives:
time_series_args.update({"objective_fields": args.objective_fields_})
if args.damped_trend is not None:
time_series_args.update({"damped_trend": args.damped_trend})
if args.error is not None:
time_series_args.update({"error": args.error})
if args.field_parameters:
time_series_args.update({"field_parameters": args.field_parameters_})
if args.range_:
time_series_args.update({"range": args.range_})
if args.seasonality is not None:
time_series_args.update({"seasonality": args.seasonality})
if args.trend is not None:
time_series_args.update({"trend": args.trend})
if args.time_start or args.time_end or args.time_interval or \
args.time_interval_unit:
time_range = {}
if args.time_start:
time_range.update({"start": args.time_start})
if args.time_end:
time_range.update({"end": args.time_end})
if args.time_interval:
time_range.update({"interval": args.time_interval})
if args.time_interval_unit:
time_range.update({"interval_unit": args.time_interval_unit})
time_series.update({"time_range": time_range})
if 'time_series' in args.json_args:
update_json_args(time_series_args,
args.json_args.get('time_series'),
fields)
return time_series_args
def create_time_series(datasets, time_series_ids,
time_series_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote time-series
"""
if api is None:
api = bigml.api.BigML()
time_series_set = time_series_ids[:]
existing_time_series = len(time_series_set)
time_series_args_list = []
datasets = datasets[existing_time_series:]
# if resuming and all time-series were created,
# there will be no datasets left
if datasets:
if isinstance(time_series_args, list):
time_series_args_list = time_series_args
# Only one time-series per command, at present
number_of_time_series = 1
message = dated("Creating %s time-series.\n" %
number_of_time_series)
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_time_series):
wait_for_available_tasks(inprogress,
args.max_parallel_time_series,
api, "timeseries")
if time_series_args_list:
time_series_args = time_series_args_list[i]
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
time_series = api.create_time_series( \
dataset, time_series_args, retries=None)
else:
time_series = api.create_time_series( \
datasets, time_series_args, retries=None)
time_series_id = check_resource_error( \
time_series, "Failed to create time-series: ")
log_message("%s\n" % time_series_id, log_file=log)
time_series_ids.append(time_series_id)
inprogress.append(time_series_id)
time_series_set.append(time_series)
log_created_resources("time_series",
path,
time_series_id, mode='a')
if args.verbosity:
if bigml.api.get_status(time_series)['code'] != \
bigml.api.FINISHED:
try:
time_series = check_resource( \
time_series, api.get_time_series,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished time-series:"
" %s" %
str(exception))
time_series_set[0] = time_series
message = dated("Time-series created: %s\n" %
get_url(time_series))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, time_series)
return time_series_set, time_series_ids
def get_time_series(time_series_ids,
args, api=None, session_file=None):
"""Retrieves remote time-series in its actual status
"""
if api is None:
api = bigml.api.BigML()
time_series_id = ""
time_series_set = time_series_ids
time_series_id = time_series_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("time-series", len(time_series_ids)),
get_url(time_series_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one time-series to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
time_series = check_resource(time_series_ids[0],
api.get_time_series,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished time-series: %s" % \
str(exception))
time_series_set[0] = time_series
return time_series_set, time_series_ids
def set_publish_time_series_args(args):
"""Set args to publish time-series
"""
public_time_series = {}
if args.public_time_series:
public_time_series = {"private": False}
if args.model_price:
public_time_series.update(price=args.model_price)
if args.cpp:
public_time_series.update(credits_per_prediction=args.cpp)
return public_time_series
def update_time_series(time_series, time_series_args,
args, api=None, path=None, session_file=None):
"""Updates time-series properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating time-series. %s\n" %
get_url(time_series))
log_message(message, log_file=session_file,
console=args.verbosity)
time_series = api.update_time_series(time_series, \
time_series_args)
check_resource_error(time_series,
"Failed to update time-series: %s"
% time_series['resource'])
time_series = check_resource(time_series,
api.get_time_series,
query_string=FIELDS_QS)
if is_shared(time_series):
message = dated("Shared time-series link. %s\n" %
get_url(time_series, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, time_series)
return time_series
def set_library_args(args, name=None):
"""Returns a library arguments dict
"""
if name is None:
name = args.name
library_args = set_basic_args(args, name)
if args.project_id is not None:
library_args.update({"project": args.project_id})
if args.imports is not None:
library_args.update({"imports": args.imports_})
update_attributes(library_args, args.json_args.get('library'))
return library_args
def create_library(source_code, library_args, args, api=None, path=None,
session_file=None, log=None):
"""Creates remote library
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating library \"%s\".\n" % library_args["name"])
log_message(message, log_file=session_file, console=args.verbosity)
library = api.create_library(source_code, library_args)
log_created_resources("library", path,
bigml.api.get_library_id(library), mode='a')
library_id = check_resource_error(library, "Failed to create library: ")
try:
library = check_resource(library, api.get_library)
except ValueError, exception:
sys.exit("Failed to get a compiled library: %s" % str(exception))
message = dated("Library created: %s\n" % get_url(library))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % library_id, log_file=log)
return library
def update_sample_parameters_args(resource_args, args):
"""Updates the information related to the common sampling options
"""
if args.sample_rate != 1:
resource_args.update({"sample_rate": args.sample_rate})
if hasattr(args, "out_of_bag") and args.out_of_bag:
resource_args.update({"out_of_bag": True})
if hasattr(args, "replacement") and args.replacement:
resource_args.update({"replacement": True})
if hasattr(args, "randomize") and args.randomize:
resource_args.update({"randomize": True})
return resource_args
def set_topic_model_args(args, name=None, fields=None,
topic_model_fields=None):
"""Return topic_model arguments dict
"""
if name is None:
name = args.name
if topic_model_fields is None:
topic_model_fields = args.topic_model_fields_
topic_model_args = set_basic_args(args, name)
topic_model_args.update({
"seed": SEED if args.seed is None else args.seed,
"topicmodel_seed": SEED if args.seed is None else args.seed
})
if topic_model_fields and fields is not None:
input_fields = configure_input_fields(fields, topic_model_fields)
topic_model_args.update(input_fields=input_fields)
topic_model_args.update({"sample_rate": args.sample_rate})
topic_model_args.update({"bigrams": args.bigrams})
topic_model_args.update({"case_sensitive": args.case_sensitive})
if args.number_of_topics is not None:
topic_model_args.update({"number_of_topics": args.number_of_topics})
if args.term_limit is not None:
topic_model_args.update({"term_limit": args.term_limit})
if args.top_n_terms is not None:
topic_model_args.update({"top_n_terms": args.top_n_terms})
if args.minimum_name_terms is not None:
topic_model_args.update({"minimum_name_terms":
args.minimum_name_terms})
if args.excluded_terms:
topic_model_args.update({"excluded_terms": args.excluded_terms_})
topic_model_args = update_sample_parameters_args( \
topic_model_args, args)
if 'topic_model' in args.json_args:
update_json_args(topic_model_args,
args.json_args.get('topic_model'),
fields)
return topic_model_args
def create_topic_models(datasets, topic_model_ids, topic_model_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote topic models
"""
if api is None:
api = bigml.api.BigML()
topic_models = topic_model_ids[:]
existing_topic_models = len(topic_models)
topic_model_args_list = []
datasets = datasets[existing_topic_models:]
# if resuming and all topic models were created, there will
# be no datasets left
if datasets:
if isinstance(topic_model_args, list):
topic_model_args_list = topic_model_args
# Only one topic model per command, at present
number_of_topic_models = 1
message = dated("Creating %s.\n" %
plural("topic model", number_of_topic_models))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_topic_models):
wait_for_available_tasks(inprogress,
args.max_parallel_topic_models,
api, "topicmodel")
if topic_model_args_list:
topic_model_args = topic_model_args_list[i]
topic_model = api.create_topic_model(datasets,
topic_model_args,
retries=None)
topic_model_id = check_resource_error( \
topic_model,
"Failed to create topic model: ")
log_message("%s\n" % topic_model_id, log_file=log)
topic_model_ids.append(topic_model_id)
inprogress.append(topic_model_id)
topic_models.append(topic_model)
log_created_resources("topic_models", path, topic_model_id,
mode='a')
if args.verbosity:
if bigml.api.get_status(topic_model)['code'] != bigml.api.FINISHED:
try:
topic_model = check_resource( \
topic_model, api.get_topic_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished topic model: %s" %
str(exception))
topic_models[0] = topic_model
message = dated("Topic model created: %s\n" %
get_url(topic_model))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, topic_model)
return topic_models, topic_model_ids
def get_topic_models(topic_model_ids,
args, api=None, session_file=None):
"""Retrieves remote topic model in its actual status
"""
if api is None:
api = bigml.api.BigML()
topic_model_id = ""
topic_models = topic_model_ids
topic_model_id = topic_model_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("topic model", len(topic_model_ids)),
get_url(topic_model_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one topic_model at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
topic_model = check_resource(topic_model_ids[0],
api.get_topic_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished topic model: %s" % \
str(exception))
topic_models[0] = topic_model
return topic_models, topic_model_ids
def set_publish_topic_model_args(args):
"""Set args to publish topic model
"""
public_topic_model = {}
if args.public_topic_model:
public_topic_model = {"private": False}
if args.model_price:
public_topic_model.update(price=args.model_price)
if args.cpp:
public_topic_model.update(credits_per_prediction=args.cpp)
return public_topic_model
def update_topic_model(topic_model, topic_model_args,
args, api=None, path=None, session_file=None):
"""Updates topic model properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating topic model. %s\n" %
get_url(topic_model))
log_message(message, log_file=session_file,
console=args.verbosity)
topic_model = api.update_topic_model(topic_model, \
topic_model_args)
check_resource_error(topic_model,
"Failed to update topic model: %s"
% topic_model['resource'])
topic_model = check_resource(topic_model,
api.get_topic_model,
query_string=FIELDS_QS)
if is_shared(topic_model):
message = dated("Shared topic model link. %s\n" %
get_url(topic_model, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, topic_model)
return topic_model
def set_batch_topic_distribution_args( \
args, fields=None, dataset_fields=None):
"""Return batch topic distribution args dict
"""
batch_topic_distribution_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_topic_distribution_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_topic_distribution_args.update(all_fields=True)
if args.prediction_fields:
batch_topic_distribution_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_topic_distribution_args.update(output_fields=prediction_fields)
if 'batch_topic_distribution' in args.json_args:
update_json_args(
batch_topic_distribution_args, args.json_args.get( \
'batch_topic_distribution'), fields)
return batch_topic_distribution_args
def create_batch_topic_distribution(topic_model, test_dataset,
batch_topic_distribution_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch topic distribution
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch topic distribution.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_topic_distribution = api.create_batch_topic_distribution( \
topic_model, test_dataset, batch_topic_distribution_args, retries=None)
log_created_resources( \
"batch_topic_distribution", path,
bigml.api.get_batch_topic_distribution_id(batch_topic_distribution),
mode='a')
batch_topic_distribution_id = check_resource_error(
batch_topic_distribution,
"Failed to create batch topic distribution: ")
try:
batch_topic_distribution = check_resource( \
batch_topic_distribution, api.get_batch_topic_distribution)
except ValueError, exception:
sys.exit("Failed to get a finished batch topic distribution: %s"
% str(exception))
message = dated("Batch topic distribution created: %s\n"
% get_url(batch_topic_distribution))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_topic_distribution_id, log_file=log)
if args.reports:
report(args.reports, path, batch_topic_distribution)
return batch_topic_distribution
def set_forecast_args(args, fields=None):
"""Return forecast dict
"""
forecast_args = set_basic_args(args, args.name)
forecast_args.update({
"intervals": args.intervals,
})
if 'forecast' in args.json_args:
update_json_args(
forecast_args,
args.json_args.get('forecast'),
fields)
return forecast_args
def create_forecast(time_series, input_data, forecast_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote forecast
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating remote forecast.\n")
log_message(message, log_file=session_file, console=args.verbosity)
forecast = api.create_forecast(time_series, input_data,
forecast_args,
retries=None)
log_created_resources("forecast", path,
bigml.api.get_forecast_id(forecast),
mode='a')
forecast_id = check_resource_error(
forecast, "Failed to create forecast: ")
try:
forecast = check_resource(forecast, api.get_forecast)
except ValueError, exception:
sys.exit("Failed to get a finished forecast: %s"
% str(exception))
message = dated("Forecast created: %s\n"
% get_url(forecast))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % forecast_id, log_file=log)
if args.reports:
report(args.reports, path, forecast)
return forecast
def set_deepnet_args(args, name=None, fields=None,
objective_id=None,
deepnet_fields=None):
"""Return deepnet arguments dict
"""
if name is None:
name = args.name
if deepnet_fields is None:
deepnet_fields = args.deepnet_fields_
if objective_id is None:
objective_id = args.objective_id_
deepnet_args = set_basic_model_args(args, name)
deepnet_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
deepnet_args.update({"objective_field": objective_id})
if deepnet_fields and fields is not None:
input_fields = configure_input_fields(fields, deepnet_fields)
deepnet_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
deepnet_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
deepnet_args.update({"sample_rate": args.sample_rate})
if args.batch_normalization is not None:
deepnet_args.update({"batch_normalization": args.batch_normalization})
if args.dropout_rate:
deepnet_args.update({"dropout_rate": args.dropout_rate})
if args.hidden_layers is not None:
deepnet_args.update({"hidden_layers": args.hidden_layers_})
if args.learn_residuals is not None:
deepnet_args.update( \
{"learn_residuals": args.learn_residuals})
if args.max_iterations is not None:
deepnet_args.update(\
{"learning_rate": args.learning_rate})
if args.max_training_time is not None:
deepnet_args.update(\
{"max_training_time": args.max_training_time})
if args.number_of_hidden_layers is not None:
deepnet_args.update(\
{"number_of_hidden_layers": args.number_of_hidden_layers})
if args.number_of_model_candidates is not None:
deepnet_args.update(\
{"number_of_model_candidates": args.number_of_model_candidates})
if args.search is not None:
deepnet_args.update(\
{"search": args.search})
if args.suggest_structure is not None:
deepnet_args.update(\
{"suggest_structure": args.suggest_structure})
if not args.missing_numerics:
deepnet_args.update(\
{"missing_numerics": args.missing_numerics})
if args.tree_embedding:
deepnet_args.update(\
{"tree_embedding": args.tree_embedding})
deepnet_args = update_sample_parameters_args( \
deepnet_args, args)
if 'deepnet' in args.json_args:
update_json_args(deepnet_args,
args.json_args.get('deepnet'),
fields)
return deepnet_args
def create_deepnets(datasets, deepnet_ids,
deepnet_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote deepnets
"""
if api is None:
api = bigml.api.BigML()
deepnets = deepnet_ids[:]
existing_deepnets = len(deepnets)
deepnet_args_list = []
datasets = datasets[existing_deepnets:]
# if resuming and all deepnets were created,
# there will be no datasets left
if datasets:
if isinstance(deepnet_args, list):
deepnet_args_list = deepnet_args
# Only one deepnet per command, at present
number_of_deepnets = 1
message = dated("Creating %s.\n" %
plural("deepnet",
number_of_deepnets))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_deepnets):
wait_for_available_tasks(inprogress,
args.max_parallel_deepnets,
api, "deepnet")
if deepnet_args_list:
deepnet_args = deepnet_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_deepnets)
deepnet_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
deepnet = api.create_deepnet( \
dataset, deepnet_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_deepnets]
deepnet = api.create_deepnet( \
multi_dataset, deepnet_args, retries=None)
else:
deepnet = api.create_deepnet( \
datasets, deepnet_args, retries=None)
deepnet_id = check_resource_error( \
deepnet, "Failed to create deepnet: ")
log_message("%s\n" % deepnet_id, log_file=log)
deepnet_ids.append(deepnet_id)
inprogress.append(deepnet_id)
deepnets.append(deepnet)
log_created_resources("deepnets",
path,
deepnet_id, mode='a')
if args.verbosity:
if bigml.api.get_status(deepnet)['code'] != \
bigml.api.FINISHED:
try:
deepnet = check_resource( \
deepnet, api.get_deepnet,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished deepnet:"
" %s" %
str(exception))
deepnets[0] = deepnet
message = dated("Deepnet created: %s\n" %
get_url(deepnet))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, deepnet)
return deepnets, deepnet_ids
def get_deepnets(deepnet_ids, args, api=None, session_file=None):
"""Retrieves remote deepnet in its actual status
"""
if api is None:
api = bigml.api.BigML()
deepnet_id = ""
deepnets = deepnet_ids
deepnet_id = deepnet_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("deepnet", len(deepnet_ids)),
get_url(deepnet_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one deepnet to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
deepnet = check_resource(deepnet_ids[0],
api.get_deepnet,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished deepnet: %s" % \
str(exception))
deepnets[0] = deepnet
return deepnets, deepnet_ids
def update_deepnets(deepnet, deepnet_args,
args, api=None, path=None, session_file=None):
"""Updates deepnet properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating deepnet. %s\n" %
get_url(deepnet))
log_message(message, log_file=session_file,
console=args.verbosity)
deepnet = api.update_deepnet(deepnet, deepnet_args)
check_resource_error(deepnet,
"Failed to update deepnet: %s"
% deepnet['resource'])
deepnet = check_resource(deepnet,
api.get_deepnet,
query_string=FIELDS_QS)
if is_shared(deepnet):
message = dated("Shared deepnet link. %s\n" %
get_url(deepnet, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, deepnet)
return deepnet
def set_pca_args(args, name=None, fields=None,
pca_fields=None):
"""Return pca arguments dict
"""
if name is None:
name = args.name
if pca_fields is None:
pca_fields = args.pca_fields_
pca_args = set_basic_args(args, name)
pca_args.update({
"seed": SEED if args.seed is None else args.seed,
"pca_seed": SEED if args.seed is None else args.seed
})
pca_args.update({"sample_rate": args.sample_rate})
pca_args = update_sample_parameters_args( \
pca_args, args)
if fields is not None:
input_fields = fields.fields.keys()
if pca_fields and fields is not None:
input_fields = configure_input_fields(fields, pca_fields)
if args.exclude_objective:
input_fields = [field for field in input_fields \
if field not in args.exclude_fields]
pca_args.update(input_fields=input_fields)
if 'pca' in args.json_args:
update_json_args(pca_args,
args.json_args.get('pca'),
fields)
return pca_args
def create_pca(datasets, pca, pca_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote pcas
"""
if api is None:
api = bigml.api.BigML()
pcas = []
pca_ids = []
if pca is not None:
pcas = [pca]
pca_ids = [pca]
existing_pcas = len(pcas)
pca_args_list = []
datasets = datasets[existing_pcas:]
# if resuming and all pcas were created, there will
# be no datasets left
if datasets:
if isinstance(pca_args, list):
pca_args_list = pca_args
# Only one pca per command, at present
number_of_pcas = 1
message = dated("Creating %s.\n" %
plural("pca", number_of_pcas))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_pcas):
wait_for_available_tasks(inprogress,
args.max_parallel_pcas,
api, "pca")
if pca_args_list:
pca_args = pca_args_list[i]
pca = api.create_pca(datasets,
pca_args,
retries=None)
pca_id = check_resource_error( \
pca,
"Failed to create pca: ")
log_message("%s\n" % pca_id, log_file=log)
pca_ids.append(pca_id)
inprogress.append(pca_id)
pcas.append(pca)
log_created_resources("pcas", path, pca_id,
mode='a')
if args.verbosity:
if bigml.api.get_status(pca)['code'] != bigml.api.FINISHED:
try:
pca = check_resource( \
pca, api.get_pca,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished pca: %s" %
str(exception))
pcas[0] = pca
message = dated("PCA created: %s\n" %
get_url(pca))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, pca)
return pca
def get_pca(pca,
args, api=None, session_file=None):
"""Retrieves remote pca in its actual status
"""
if api is None:
api = bigml.api.BigML()
message = dated("Retrieving PCA. %s\n" %
get_url(pca))
log_message(message, log_file=session_file, console=args.verbosity)
# only one PCA at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
pca = check_resource(pca,
api.get_pca,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished pca: %s" % \
str(exception))
return pca
def set_publish_pca_args(args):
"""Set args to publish pca
"""
public_pca = {}
if args.public_pca:
public_pca = {"private": False}
if args.model_price:
public_pca.update(price=args.model_price)
if args.cpp:
public_pca.update(credits_per_prediction=args.cpp)
return public_pca
def update_pca(pca, pca_args,
args, api=None, path=None, session_file=None):
"""Updates pca properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating PCA. %s\n" %
get_url(pca))
log_message(message, log_file=session_file,
console=args.verbosity)
pca = api.update_pca(pca, pca_args)
check_resource_error(pca,
"Failed to update PCA: %s"
% pca['resource'])
pca = check_resource(pca,
api.get_pca,
query_string=FIELDS_QS)
if is_shared(pca):
message = dated("Shared PCA link. %s\n" %
get_url(pca, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, pca)
return pca
def set_batch_projection_args( \
args, fields=None, dataset_fields=None):
"""Return batch projection args dict
"""
batch_projection_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_projection_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
batch_projection_args.update(all_fields=False)
if args.projection_fields:
batch_projection_args.update(all_fields=True)
projection_fields = []
if args.projection_fields != "all":
batch_projection_args.update(all_fields=True)
for field in args.projection_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
projection_fields.append(field)
batch_projection_args.update(output_fields=projection_fields)
if 'batch_projection' in args.json_args:
update_json_args(
batch_projection_args, args.json_args.get( \
'batch_projection'), fields)
return batch_projection_args
def create_batch_projection(pca, test_dataset,
batch_projection_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch projection
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch projection.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_projection = api.create_batch_projection( \
pca, test_dataset, batch_projection_args, retries=None)
log_created_resources( \
"batch_projection", path,
bigml.api.get_batch_projection_id(batch_projection), mode='a')
batch_projection_id = check_resource_error(
batch_projection,
"Failed to create batch projection: ")
try:
batch_projection = check_resource( \
batch_projection, api.get_batch_projection)
except ValueError, exception:
sys.exit("Failed to get a finished batch projection: %s"
% str(exception))
message = dated("Batch projection created: %s\n"
% get_url(batch_projection))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_projection_id, log_file=log)
if args.reports:
report(args.reports, path, batch_projection)
return batch_projection
def set_fusion_args(args, name=None, fields=None):
"""Return fusion arguments dict
"""
if name is None:
name = args.name
fusion_args = set_basic_args(args, name)
if 'fusion' in args.json_args:
update_json_args(fusion_args,
args.json_args.get('fusion'),
fields)
return fusion_args
def create_fusion(models, fusion, fusion_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote fusion
"""
if api is None:
api = bigml.api.BigML()
fusions = []
fusion_ids = []
if fusion is not None:
fusions = [fusion]
fusion_ids = [fusion]
existing_fusions = len(fusions)
# if resuming and all fusions were created
if models:
# Only one fusion per command, at present
number_of_fusions = 1
message = dated("Creating %s.\n" %
plural("fusion", number_of_fusions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_fusions):
wait_for_available_tasks(inprogress,
args.max_parallel_fusions,
api, "fusion")
fusion = api.create_fusion(models,
fusion_args,
retries=None)
fusion_id = check_resource_error( \
fusion,
"Failed to create fusion: ")
log_message("%s\n" % fusion_id, log_file=log)
fusion_ids.append(fusion_id)
inprogress.append(fusion_id)
fusions.append(fusion)
log_created_resources("fusions", path, fusion_id,
mode='a')
if args.verbosity:
if bigml.api.get_status(fusion)['code'] != bigml.api.FINISHED:
try:
fusion = check_resource( \
fusion, api.get_fusion,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished fusion: %s" %
str(exception))
fusions[0] = fusion
message = dated("Fusion created: %s\n" %
get_url(fusion))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, fusion)
return fusion
def get_fusion(fusion,
args, api=None, session_file=None):
"""Retrieves remote fusion in its actual status
"""
if api is None:
api = bigml.api.BigML()
message = dated("Retrieving Fusion. %s\n" %
get_url(fusion))
log_message(message, log_file=session_file, console=args.verbosity)
# only one fusion at present
try:
# we need the whole fields structure when exporting fields
fusion = check_resource(fusion,
api.get_fusion,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished fusion: %s" % \
str(exception))
return fusion
def set_publish_fusion_args(args):
"""Set args to publish fusion
"""
public_fusion = {}
if args.public_fusion:
public_fusion = {"private": False}
if args.model_price:
public_fusion.update(price=args.model_price)
if args.cpp:
public_fusion.update(credits_per_prediction=args.cpp)
return public_fusion
def update_fusion(fusion, fusion_args, args,
api=None, path=None, session_file=None):
"""Updates fusion properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating Fusion. %s\n" %
get_url(fusion))
log_message(message, log_file=session_file,
console=args.verbosity)
fusion = api.update_fusion(fusion, fusion_args)
check_resource_error(fusion,
"Failed to update Fusion: %s"
% fusion['resource'])
fusion = check_resource(fusion,
api.get_fusion,
query_string=FIELDS_QS)
if is_shared(fusion):
message = dated("Shared Fusion link. %s\n" %
get_url(fusion, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, fusion)
return fusion
| 38.381466 | 81 | 0.616149 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources management functions
"""
from __future__ import absolute_import
import sys
import time
try:
import simplejson as json
except ImportError:
import json
import bigml.api
from bigml.util import bigml_locale
from bigml.multivote import THRESHOLD_CODE
from bigmler.utils import (dated, get_url, log_message, plural, check_resource,
check_resource_error, log_created_resources,
decode2, transform_fields_keys,
is_shared, FILE_ENCODING, PYTHON3)
from bigmler.labels import label_model_args, get_all_labels
from bigmler.reports import report
EVALUATE_SAMPLE_RATE = 0.8
SEED = "BigML, Machine Learning made easy"
LOCALE_DEFAULT = "en_US"
FIELDS_QS = 'only_model=true'
ALL_FIELDS_QS = "limit=-1"
ADD_PREFIX = '+'
REMOVE_PREFIX = '-'
ADD_REMOVE_PREFIX = [ADD_PREFIX, REMOVE_PREFIX]
BRIEF_FORMAT = 'brief'
NORMAL_FORMAT = 'normal'
FULL_FORMAT = 'full'
VALID_FIELD_ATTRIBUTES = {
"source": ["name", "label", "description", "optype", "term_analysis"],
"dataset": ["name", "label", "description", "preferred", "term_analysis"]}
BOOSTING_OPTIONS = ["iterations", "early_holdout", "learning_rate", \
"early_out_of_bag", "step_out_of_bag"]
DS_NAMES = "ABCDEFGHIJKLMNOPQRSTUVXYZ"
def get_basic_seed(order):
""" Builds a standard seed from a text adding the order
"""
return "%s - %s" % (SEED, order)
def shared_changed(shared, resource):
"""Returns True if the shared status of the resource differs from the user
given value
"""
return is_shared(resource) != shared
def configure_input_fields(fields, user_given_fields, by_name=False):
""" Returns the input fields used in the new resource creation as given
The user can choose to write all the fields that will be used in the
new resource or modify the set of fields retrieved from the
resource that will be used to create the new one.
"""
def modify_input_fields(prefix, field, input_fields):
"""Adds or removes according to the prefix in the given field
this field from the list of input fields.
"""
if prefix == ADD_PREFIX:
if not field in input_fields:
input_fields.append(field)
elif field in input_fields:
input_fields.remove(field)
# case of adding and removing fields to the dataset preferred field set
if all([name[0] in ADD_REMOVE_PREFIX for name in user_given_fields]):
preferred_fields = fields.preferred_fields()
input_fields = preferred_fields.keys()
if by_name:
input_fields = [fields.field_name(field_id) for field_id in
input_fields]
for name in user_given_fields:
prefix = name[0]
field_name = name[1:]
if by_name:
modify_input_fields(prefix, field_name, input_fields)
else:
try:
field_id = fields.field_id(field_name)
except ValueError, exc:
sys.exit(exc)
modify_input_fields(prefix, field_id, input_fields)
# case of user given entire list of fields
else:
if by_name:
return user_given_fields
else:
input_fields = []
for name in user_given_fields:
try:
input_fields.append(fields.field_id(name))
except ValueError, exc:
sys.exit(exc)
return input_fields
def utf8(text):
"""Encodes using the global FILE_ENCODING
"""
return text.encode(FILE_ENCODING)
def update_attributes(updatable_attributes, new_attributes, by_column=False,
fields=None):
"""Correctly merging the "fields" attribute substructure in updates.
updatable_attributes: previous attributes to be updated
new_attributes: updates to be added
by_column: set to True is keys are the column position of the field
fields: Fields object info
"""
if new_attributes:
fields_substructure = updatable_attributes.get("fields", {})
field_attributes = new_attributes.get("fields", {})
if field_attributes and (not by_column or fields):
for field_key, value in field_attributes.items():
field_id = (field_key if not by_column
else fields.field_id(field_key))
if not field_id in fields_substructure.keys():
fields_substructure.update({field_id: {}})
fields_substructure[field_id].update(value)
updatable_attributes.update({"fields": fields_substructure})
else:
updatable_attributes.update(new_attributes)
def update_json_args(resource_attributes, json_attributes, fields=None):
"""Updating the resource attributes with the contents of a JSON file
"""
if fields is not None:
# transforms the fields structure changes if columns are used as keys
json_attributes = transform_fields_keys(json_attributes, fields)
update_attributes(resource_attributes, json_attributes)
def relative_input_fields(fields, user_given_fields):
"""Returns the user given input fields using relative syntax
"""
input_fields = []
if all([(name[0] in ADD_REMOVE_PREFIX) for name in user_given_fields]):
return user_given_fields
preferred_fields = fields.preferred_fields()
for field_id in preferred_fields.keys():
name = fields.fields[field_id]['name']
if not name in user_given_fields:
input_fields.append("%s%s" % (REMOVE_PREFIX, name))
for name in user_given_fields:
try:
field_id = fields.field_id(name)
except ValueError, exc:
sys.exit(exc)
input_fields.append("%s%s" % (ADD_PREFIX, name))
return input_fields
def wait_for_available_tasks(inprogress, max_parallel, api,
resource_type, wait_step=2):
"""According to the max_parallel number of parallel resources to be
created, when the number of in progress resources reaches the limit,
it checks the ones in inprogress to see if there's a
FINISHED or FAULTY resource. If found, it is removed from the
inprogress list and returns to allow another one to be created.
"""
check_kwargs = {"retries": 0, "query_string": "full=false", "api": api}
while len(inprogress) == max_parallel:
for j in range(0, len(inprogress)):
try:
ready = check_resource(inprogress[j], **check_kwargs)
status = bigml.api.get_status(ready)
if status['code'] == bigml.api.FINISHED:
del inprogress[j]
return
elif status['code'] == bigml.api.FAULTY:
raise ValueError(status['message'])
except ValueError, exception:
sys.exit("Failed to get a finished %s: %s" %
(resource_type, str(exception)))
time.sleep(max_parallel * wait_step)
def check_fields_struct(update_args, resource_type):
"""In case the args to update have a `fields` attribute, it checks the
structure in this attribute and removes the attributes for each field
that will not be accepted by the API.
"""
if "fields" in update_args:
fields_substr = update_args.get("fields")
for _, field in fields_substr.items():
attributes = field.keys()
for attribute in attributes:
if not attribute in VALID_FIELD_ATTRIBUTES.get(resource_type):
del field[attribute]
def set_basic_args(args, name):
"""Sets the basic arguments, common to all resources
"""
return {
"name": name,
"description": args.description_,
"category": args.category,
"tags": args.tag}
def set_basic_model_args(args, name):
"""Sets the additional args common to all models
"""
model_args = set_basic_args(args, name)
if args.default_numeric_value is not None:
model_args.update({ \
"default_numeric_value": args.default_numeric_value})
return model_args
def set_basic_batch_args(args, name):
"""Sets the additional args common to all batch resources
"""
batch_args = set_basic_args(args, name)
header = (hasattr(args, "prediction_header") and args.prediction_header) \
or (hasattr(args, "projection_header") and args.projection_header)
batch_args.update({ \
"header": header,
"output_dataset": args.to_dataset
})
return batch_args
def set_source_args(args, name=None, multi_label_data=None,
data_set_header=None, fields=None):
"""Returns a source arguments dict
"""
if name is None:
name = args.name
source_args = set_basic_args(args, name)
if args.project_id is not None:
source_args.update({"project": args.project_id})
# if header is set, use it
if data_set_header is not None:
source_args.update({"source_parser": {"header": data_set_header}})
# If user has given an OS locale, try to add the locale used in bigml.com
if args.user_locale is not None:
source_locale = bigml_locale(args.user_locale)
if source_locale is None:
log_message("WARNING: %s locale equivalence not found."
" Using %s instead.\n" % (args.user_locale,
LOCALE_DEFAULT),
log_file=None, console=True)
source_locale = LOCALE_DEFAULT
source_args.update({'source_parser': {}})
source_args["source_parser"].update({'locale': source_locale})
# If user has set a training separator, use it.
if args.training_separator is not None:
training_separator = decode2(args.training_separator,
encoding="string_escape")
source_args["source_parser"].update({'separator': training_separator})
# If uploading a multi-label file, add the user_metadata info needed to
# manage the multi-label fields
if (hasattr(args, 'multi_label') and args.multi_label
and multi_label_data is not None):
source_args.update({
"user_metadata": {
"multi_label_data": multi_label_data}})
# to update fields attributes or types you must have a previous fields
# structure (at update time)
if fields:
if args.field_attributes_:
update_attributes(source_args,
{"fields": args.field_attributes_},
by_column=True, fields=fields)
if args.types_:
update_attributes(source_args,
{"fields": args.types_},
by_column=True, fields=fields)
if args.import_fields:
fields_struct = fields.new_fields_structure(args.import_fields)
check_fields_struct(fields_struct, "source")
update_attributes(source_args, fields_struct)
if 'source' in args.json_args:
update_json_args(source_args, args.json_args.get('source'), fields)
return source_args
def create_source(data_set, source_args, args, api=None, path=None,
session_file=None, log=None, source_type=None):
"""Creates remote source
"""
if api is None:
api = bigml.api.BigML()
suffix = "" if source_type is None else "%s " % source_type
message = dated("Creating %ssource.\n" % suffix)
log_message(message, log_file=session_file, console=args.verbosity)
check_fields_struct(source_args, "source")
source = api.create_source(data_set, source_args,
progress_bar=args.progress_bar)
if path is not None:
suffix = "_" + source_type if source_type else ""
log_created_resources(
"source%s" % suffix, path,
source['resource'], mode='a',
comment=("%s\n" % source['object']['name']))
source_id = check_resource_error(source, "Failed to create source: ")
try:
source = check_resource(source, api.get_source,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished source: %s" % str(exception))
message = dated("Source created: %s\n" % get_url(source))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % source_id, log_file=log)
if args.reports:
report(args.reports, path, source)
return source
def data_to_source(args):
"""Extracts the flags info to create a source object
"""
data_set = None
data_set_header = None
if (args.training_set and not args.source and not args.dataset and
not args.has_models_):
data_set = args.training_set
data_set_header = args.train_header
elif (hasattr(args, 'evaluate') and args.evaluate and args.test_set
and not args.source):
data_set = args.test_set
data_set_header = args.test_header
return data_set, data_set_header
def get_source(source, api=None, verbosity=True,
session_file=None):
"""Retrieves the source in its actual state and its field info
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(source, basestring) or
bigml.api.get_status(source)['code'] != bigml.api.FINISHED):
message = dated("Retrieving source. %s\n" %
get_url(source))
log_message(message, log_file=session_file,
console=verbosity)
try:
source = check_resource(source, api.get_source,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished source: %s" % str(exception))
return source
def update_source(source, source_args, args,
api=None, session_file=None):
"""Updates source properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating source. %s\n" %
get_url(source))
log_message(message, log_file=session_file,
console=args.verbosity)
source = api.update_source(source, source_args)
check_resource_error(source, "Failed to update source: ")
source = check_resource(source, api.get_source)
return source
def set_basic_dataset_args(args, name=None):
"""Return dataset basic arguments dict
"""
if name is None:
name = args.name
dataset_args = set_basic_args(args, name)
if args.sample_rate != 1 and args.no_model:
dataset_args.update({
"seed": SEED if args.seed is None else args.seed,
"sample_rate": args.sample_rate
})
if hasattr(args, "range") and args.range_:
dataset_args.update({
"range": args_range
})
return dataset_args
def set_dataset_args(args, fields, multi_label_data=None):
"""Return dataset arguments dict
"""
dataset_args = set_basic_dataset_args(args)
objective_field = (None if not hasattr(args, 'objective_field')
else args.objective_field)
if multi_label_data is not None and objective_field is None:
objective_field = multi_label_data['objective_name']
if objective_field is not None and fields is not None:
try:
objective_id = fields.field_id(objective_field)
except ValueError, exc:
sys.exit(exc)
dataset_args.update(objective_field={'id': objective_id})
if hasattr(args, 'juxtapose') and args.juxtapose:
dataset_args.update({"juxtapose": args.juxtapose})
if hasattr(args, 'sql_query') and args.sql_query:
dataset_args.update({"sql_query": args.sql_query})
if hasattr(args, 'sql_output_fields_') and args.sql_output_fields_:
dataset_args.update({"sql_output_fields": args.sql_output_fields_})
if hasattr(args, 'json_query_') and args.json_query_:
dataset_args.update({"json_query": args.json_query_})
if args.json_filter:
dataset_args.update(json_filter=args.json_filter)
elif args.lisp_filter:
dataset_args.update(lisp_filter=args.lisp_filter)
if args.dataset_fields_ and fields is not None:
input_fields = configure_input_fields(fields, args.dataset_fields_)
dataset_args.update(input_fields=input_fields)
if (hasattr(args, 'multi_label') and args.multi_label
and multi_label_data is not None):
dataset_args.update(
user_metadata={'multi_label_data': multi_label_data})
if fields and args.import_fields:
fields_struct = fields.new_fields_structure(args.import_fields)
check_fields_struct(fields_struct, "dataset")
update_attributes(dataset_args, fields_struct)
if 'dataset' in args.json_args:
update_json_args(dataset_args, args.json_args.get('dataset'), fields)
return dataset_args
def set_dataset_split_args(name, description, args, sample_rate=1,
out_of_bag=False, multi_label_data=None):
"""Return dataset arguments dict to split a dataset
"""
dataset_args = {
"name": name,
"description": description,
"category": args.category,
"tags": args.tag,
"seed": SEED if args.seed is None else args.seed,
"sample_rate": sample_rate,
"out_of_bag": out_of_bag
}
if hasattr(args, "range") and args.range_:
dataset_args.update({
"range": args_range
})
if (hasattr(args, "multi_label") and
args.multi_label and multi_label_data is not None):
dataset_args.update(
user_metadata={'multi_label_data': multi_label_data})
return dataset_args
def create_dataset(origin_resource, dataset_args, args, api=None,
path=None, session_file=None, log=None, dataset_type=None):
"""Creates remote dataset from source, dataset, cluster or datasets list
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating dataset.\n")
log_message(message, log_file=session_file, console=args.verbosity)
check_fields_struct(dataset_args, "dataset")
# if --json-query or --sql-query are used and no names are set for
# the datasets, we create default naming to A, B, C, etc. for the datasets
# to be used as origin
if ((hasattr(args, 'sql_query') and args.sql_query) or \
(hasattr(args, 'json_query') and args.sql_query)) and \
isinstance(origin_resource, list) and \
((not isinstance(origin_resource[0], dict)) or \
origin_resource[0].get("name") is None):
for index, element in enumerate(origin_resource):
if index < len(DS_NAMES):
if isinstance(element, dict):
if element.get("resource") is not None:
element = {"id": element["resource"]}
element.update({"name": DS_NAMES[index]})
origin_resource[index] = element
elif isinstance(element, basestring):
origin_resource[index] = {"id": element,
"name": DS_NAMES[index]}
dataset = api.create_dataset(origin_resource, dataset_args, retries=None)
suffix = "_" + dataset_type if dataset_type else ""
log_created_resources("dataset%s" % suffix, path,
bigml.api.get_dataset_id(dataset), mode='a')
dataset_id = check_resource_error(dataset, "Failed to create dataset: ")
try:
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished dataset: %s" % str(exception))
message = dated("Dataset created: %s\n" % get_url(dataset))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % dataset_id, log_file=log)
if args.reports:
report(args.reports, path, dataset)
return dataset
def get_dataset(dataset, api=None, verbosity=True, session_file=None):
"""Retrieves the dataset in its actual state
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(dataset, basestring) or
bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED):
message = dated("Retrieving dataset. %s\n" %
get_url(dataset))
log_message(message, log_file=session_file,
console=verbosity)
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
check_resource_error(dataset, "Failed to get dataset: ")
return dataset
def publish_dataset(dataset, args, api=None, session_file=None):
"""Publishes dataset and sets its price (if any)
"""
if api is None:
api = bigml.api.BigML()
public_dataset = {"private": False}
if args.dataset_price:
public_dataset.update(price=args.dataset_price)
dataset = update_dataset(dataset, public_dataset, args, api=api,
session_file=session_file)
check_resource_error(dataset, "Failed to update dataset: ")
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
return dataset
def update_dataset(dataset, dataset_args, args,
api=None, path=None, session_file=None):
"""Updates dataset properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating dataset. %s\n" %
get_url(dataset))
log_message(message, log_file=session_file,
console=args.verbosity)
dataset = api.update_dataset(dataset, dataset_args)
if is_shared(dataset):
message = dated("Shared dataset link. %s\n" %
get_url(dataset, shared=True))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, dataset)
check_resource_error(dataset, "Failed to update dataset: ")
dataset = check_resource(dataset, api.get_dataset,
query_string=ALL_FIELDS_QS)
return dataset
def set_model_args(args, name=None, objective_id=None, fields=None,
model_fields=None, other_label=None):
"""Return model arguments dict
"""
if name is None:
name = args.name
if objective_id is None and args.max_categories is None:
objective_id = args.objective_id_
if args.max_categories > 0:
objective_id = args.objective_field
if model_fields is None:
model_fields = args.model_fields_
model_args = set_basic_model_args(args, name)
model_args.update({"missing_splits": args.missing_splits})
if objective_id is not None and fields is not None:
model_args.update({"objective_field": objective_id})
# If evaluate flag is on and no test_split flag is provided,
# we choose a deterministic sampling with
# args.sample_rate (80% by default) of the data to create the model
# If cross_validation_rate = n/100, then we choose to run 2 * n evaluations
# by holding out a n% of randomly sampled data.
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
model_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
if model_fields and fields is not None:
input_fields = configure_input_fields(
fields, model_fields, by_name=(args.max_categories > 0))
model_args.update(input_fields=input_fields)
if args.pruning and args.pruning != 'smart':
model_args.update(stat_pruning=(args.pruning == 'statistical'))
if args.node_threshold > 0:
model_args.update(node_threshold=args.node_threshold)
if args.balance:
model_args.update(balance_objective=True)
if args.split_field:
model_args.update(split_field=args.split_field)
if args.focus_field:
model_args.update(focus_field=args.focus_field)
if args.weight_field:
try:
weight_field = fields.field_id(args.weight_field)
except ValueError, exc:
sys.exit(exc)
model_args.update(weight_field=weight_field)
if args.objective_weights:
model_args.update(objective_weights=args.objective_weights_json)
if args.max_categories > 0:
model_args.update(
user_metadata={'other_label': other_label,
'max_categories': args.max_categories})
model_args = update_sample_parameters_args(model_args, args)
if 'model' in args.json_args:
update_json_args(model_args, args.json_args.get('model'), fields)
return model_args
def set_label_model_args(args, fields, labels, multi_label_data):
"""Set of args needed to build a model per label
"""
objective_field = args.objective_field
if not args.model_fields_:
model_fields = []
else:
model_fields = relative_input_fields(fields, args.model_fields_)
if objective_field is None:
objective_field = fields.objective_field
try:
objective_id = fields.field_id(objective_field)
objective_field = fields.field_name(objective_id)
except ValueError, exc:
sys.exit(exc)
all_labels = get_all_labels(multi_label_data)
model_args_list = []
for index in range(args.number_of_models - 1, -1, -1):
label = labels[index]
(new_name, label_field, single_label_fields) = label_model_args(
args.name, label, all_labels, model_fields,
objective_field)
model_args = set_model_args(args, name=new_name,
objective_id=label_field, fields=fields,
model_fields=single_label_fields)
if multi_label_data is not None:
model_args.update(
user_metadata={'multi_label_data': multi_label_data})
model_args_list.append(model_args)
return model_args_list
def create_models(datasets, model_ids, model_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote models
"""
if api is None:
api = bigml.api.BigML()
models = model_ids[:]
existing_models = len(models)
model_args_list = []
if args.dataset_off and args.evaluate:
args.test_dataset_ids = datasets[:]
if not args.multi_label:
datasets = datasets[existing_models:]
# if resuming and all models were created, there will be no datasets left
if datasets:
dataset = datasets[0]
if isinstance(model_args, list):
model_args_list = model_args
if args.number_of_models > 0:
message = dated("Creating %s.\n" %
plural("model", args.number_of_models))
log_message(message, log_file=session_file,
console=args.verbosity)
single_model = args.number_of_models == 1 and existing_models == 0
# if there's more than one model the first one must contain
# the entire field structure to be used as reference.
query_string = (FIELDS_QS if single_model and (args.test_header \
and not args.export_fields) else ALL_FIELDS_QS)
inprogress = []
for i in range(0, args.number_of_models):
wait_for_available_tasks(inprogress, args.max_parallel_models,
api, "model")
if model_args_list:
model_args = model_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_models)
model_args.update(seed=new_seed)
# one model per dataset (--max-categories or single model)
if (args.max_categories > 0 or
(args.test_datasets and args.evaluate)):
dataset = datasets[i]
model = api.create_model(dataset, model_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_models]
model = api.create_model(multi_dataset, model_args,
retries=None)
else:
model = api.create_model(datasets, model_args,
retries=None)
model_id = check_resource_error(model,
"Failed to create model: ")
log_message("%s\n" % model_id, log_file=log)
model_ids.append(model_id)
inprogress.append(model_id)
models.append(model)
log_created_resources("models", path, model_id, mode='a')
if args.number_of_models < 2 and args.verbosity:
if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
try:
model = check_resource(model, api.get_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" %
str(exception))
models[0] = model
message = dated("Model created: %s\n" %
get_url(model))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, model)
return models, model_ids
def create_model(cluster, model_args, args, api=None,
path=None, session_file=None, log=None, model_type=None):
"""Creates remote model from cluster and centroid
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating model.\n")
log_message(message, log_file=session_file, console=args.verbosity)
model = api.create_model(cluster, model_args, retries=None)
suffix = "" if model_type is None else "_%s" % model_type
log_created_resources("models%s" % suffix, path,
bigml.api.get_model_id(model), mode='a')
model_id = check_resource_error(model, "Failed to create model: ")
try:
model = check_resource(model, api.get_model,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" % str(exception))
message = dated("Model created: %s\n" % get_url(model))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % model_id, log_file=log)
if args.reports:
report(args.reports, path, model)
return model
def update_model(model, model_args, args,
api=None, path=None, session_file=None):
"""Updates model properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating model. %s\n" %
get_url(model))
log_message(message, log_file=session_file,
console=args.verbosity)
model = api.update_model(model, model_args)
check_resource_error(model, "Failed to update model: %s"
% model['resource'])
model = check_resource(model, api.get_model, query_string=ALL_FIELDS_QS)
if is_shared(model):
message = dated("Shared model link. %s\n" %
get_url(model, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, model)
return model
def get_models(model_ids, args, api=None, session_file=None):
"""Retrieves remote models in its actual status
"""
if api is None:
api = bigml.api.BigML()
model_id = ""
models = model_ids
single_model = len(model_ids) == 1
if single_model:
model_id = model_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("model", len(model_ids)),
get_url(model_id)))
log_message(message, log_file=session_file, console=args.verbosity)
if len(model_ids) < args.max_batch_models:
models = []
for model in model_ids:
try:
# if there's more than one model the first one must contain
# the entire field structure to be used as reference.
query_string = (
ALL_FIELDS_QS if (
(not single_model and (
len(models) == 0 or args.multi_label)) or
not args.test_header)
else FIELDS_QS)
model = check_resource(model, api.get_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" %
str(exception))
models.append(model)
model = models[0]
else:
try:
query_string = (ALL_FIELDS_QS if not single_model or
not args.test_header
else FIELDS_QS)
model = check_resource(model_ids[0], api.get_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished model: %s" % str(exception))
models[0] = model
return models, model_ids
def set_label_ensemble_args(args, labels, multi_label_data,
number_of_ensembles, fields):
"""Set of args needed to build an ensemble per label
"""
if not args.model_fields_:
args.model_fields_ = relative_input_fields(fields, args.model_fields_)
if args.objective_field is None:
args.objective_field = fields.objective_field
try:
objective_id = fields.field_id(args.objective_field)
except ValueError, exc:
sys.exit(exc)
objective_field = fields.fields[objective_id]['name']
ensemble_args_list = []
for index in range(number_of_ensembles - 1, -1, -1):
label = labels[index]
all_labels = get_all_labels(multi_label_data)
(new_name, label_field, single_label_fields) = label_model_args(
args.name, label, all_labels, args.model_fields_,
objective_field)
ensemble_args = set_ensemble_args(args, name=new_name,
objective_id=label_field,
model_fields=single_label_fields,
fields=fields)
if multi_label_data is not None:
ensemble_args.update(
user_metadata={'multi_label_data': multi_label_data})
ensemble_args_list.append(ensemble_args)
return ensemble_args_list
def set_ensemble_args(args, name=None,
objective_id=None, model_fields=None, fields=None):
"""Return ensemble arguments dict
"""
if name is None:
name = args.name
if objective_id is None:
objective_id = args.objective_id_
if model_fields is None:
model_fields = args.model_fields_
ensemble_args = set_basic_model_args(args, name)
ensemble_args.update({
"missing_splits": args.missing_splits,
"ensemble_sample": {"seed": SEED if args.ensemble_sample_seed is None \
else args.ensemble_sample_seed},
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
ensemble_args.update({"objective_field": objective_id})
if args.boosting:
boosting_args = {}
for option in BOOSTING_OPTIONS:
if hasattr(args, option) and getattr(args, option) is not None:
boosting_args.update({option: getattr(args, option)})
ensemble_args.update({"boosting": boosting_args})
else:
ensemble_args.update({"number_of_models": args.number_of_models})
# If evaluate flag is on and no test_split flag is provided,
# we choose a deterministic sampling with
# args.sample_rate (80% by default) of the data to create the model
if (args.evaluate and args.test_split == 0 and
args.test_datasets is None and not args.dataset_off):
ensemble_args.update({"seed": SEED})
if args.sample_rate == 1:
args.sample_rate = EVALUATE_SAMPLE_RATE
if model_fields and fields is not None:
input_fields = configure_input_fields(fields, model_fields)
ensemble_args.update(input_fields=input_fields)
if args.pruning and args.pruning != 'smart':
ensemble_args.update(stat_pruning=(args.pruning == 'statistical'))
if args.node_threshold > 0:
ensemble_args.update(node_threshold=args.node_threshold)
if args.balance:
ensemble_args.update(balance_objective=True)
if args.weight_field:
try:
weight_field = fields.field_id(args.weight_field)
except ValueError, exc:
sys.exit(exc)
ensemble_args.update(weight_field=weight_field)
if args.objective_weights:
ensemble_args.update(objective_weights=args.objective_weights_json)
if args.random_candidates:
ensemble_args.update(random_candidates=args.random_candidates)
update_attributes(ensemble_args, args.json_args.get('model'))
ensemble_args = update_sample_parameters_args(ensemble_args, args)
ensemble_args["ensemble_sample"].update( \
{"rate": args.ensemble_sample_rate,
"replacement": args.ensemble_sample_replacement})
if 'ensemble' in args.json_args:
update_json_args(ensemble_args, args.json_args.get('ensemble'), fields)
return ensemble_args
def create_ensembles(datasets, ensemble_ids, ensemble_args, args,
number_of_ensembles=1,
api=None, path=None, session_file=None, log=None):
"""Create ensembles from input data
"""
if api is None:
api = bigml.api.BigML()
ensembles = ensemble_ids[:]
existing_ensembles = len(ensembles)
model_ids = []
ensemble_args_list = []
if isinstance(ensemble_args, list):
ensemble_args_list = ensemble_args
if args.dataset_off and args.evaluate:
args.test_dataset_ids = datasets[:]
if not args.multi_label:
datasets = datasets[existing_ensembles:]
if number_of_ensembles > 0:
message = dated("Creating %s.\n" %
plural("ensemble", number_of_ensembles))
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_ensembles):
wait_for_available_tasks(inprogress, args.max_parallel_ensembles,
api, "ensemble",
wait_step=args.number_of_models)
if ensemble_args_list:
ensemble_args = ensemble_args_list[i]
if args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_ensembles]
ensemble = api.create_ensemble(multi_dataset,
ensemble_args,
retries=None)
else:
ensemble = api.create_ensemble(datasets, ensemble_args,
retries=None)
ensemble_id = check_resource_error(ensemble,
"Failed to create ensemble: ")
log_message("%s\n" % ensemble_id, log_file=log)
ensemble_ids.append(ensemble_id)
inprogress.append(ensemble_id)
ensembles.append(ensemble)
log_created_resources("ensembles", path, ensemble_id,
mode='a')
models, model_ids = retrieve_ensembles_models(ensembles, api, path)
if number_of_ensembles < 2 and args.verbosity:
message = dated("Ensemble created: %s\n" %
get_url(ensemble))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, ensemble)
return ensembles, ensemble_ids, models, model_ids
def retrieve_ensembles_models(ensembles, api, path=None):
"""Retrieves the models associated to a list of ensembles
"""
models = []
model_ids = []
for index in range(0, len(ensembles)):
ensemble = ensembles[index]
if (isinstance(ensemble, basestring) or
bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
try:
ensemble = check_resource(ensemble, api.get_ensemble)
ensembles[index] = ensemble
except ValueError, exception:
sys.exit("Failed to get a finished ensemble: %s" %
str(exception))
model_ids.extend(ensemble['object']['models'])
if path is not None:
for model_id in model_ids:
log_created_resources("models", path, model_id, mode='a')
models = model_ids[:]
models[0] = check_resource(models[0], api.get_model,
query_string=ALL_FIELDS_QS)
return models, model_ids
def get_ensemble(ensemble, api=None, verbosity=True, session_file=None):
"""Retrieves remote ensemble in its actual status
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(ensemble, basestring) or
bigml.api.get_status(ensemble)['code'] != bigml.api.FINISHED):
message = dated("Retrieving ensemble. %s\n" %
get_url(ensemble))
log_message(message, log_file=session_file,
console=verbosity)
ensemble = check_resource(ensemble, api.get_ensemble)
check_resource_error(ensemble, "Failed to get ensemble: ")
return ensemble
def set_publish_model_args(args):
"""Set args to publish model
"""
public_model = {}
if args.black_box:
public_model = {"private": False}
if args.white_box:
public_model = {"private": False, "white_box": True}
if args.model_price:
public_model.update(price=args.model_price)
if args.cpp:
public_model.update(credits_per_prediction=args.cpp)
return public_model
def map_fields(fields_map, model_fields, dataset_fields):
"""Build a dict to map model to dataset fields
"""
update_map = {}
for (model_column, dataset_column) in fields_map.iteritems():
try:
update_map.update({
model_fields.field_id(model_column):
dataset_fields.field_id(dataset_column)})
except ValueError, exc:
sys.exit(exc)
return update_map
def set_evaluation_args(args, fields=None,
dataset_fields=None, name=None):
"""Return evaluation args dict
"""
if name is None:
name = args.name
evaluation_args = set_basic_args(args, name)
if hasattr(args, 'method') and (args.number_of_models > 1
or args.ensemble):
evaluation_args.update(combiner=args.method)
if hasattr(args, 'method') and args.method:
evaluation_args.update({"combiner": args.method})
if args.method == THRESHOLD_CODE:
threshold = {}
if hasattr(args, 'threshold') and args.threshold is not None:
threshold.update(k=args.threshold)
if hasattr(args, 'threshold_class') \
and args.threshold_class is not None:
threshold.update({"class": args.threshold_class})
evaluation_args.update(threshold=threshold)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
evaluation_args.update({"fields_map": map_fields(args.fields_map_,
fields,
dataset_fields)})
if hasattr(args, 'missing_strategy') and args.missing_strategy:
evaluation_args.update(missing_strategy=args.missing_strategy)
if 'evaluation' in args.json_args:
update_json_args(
evaluation_args, args.json_args.get('evaluation'), fields)
# if evaluating time series we need to use ranges
if args.subcommand == "time-series" and args.test_split == 0 and \
not args.has_test_datasets_:
args.range_ = [int(args.max_rows * EVALUATE_SAMPLE_RATE) + 1,
args.max_rows]
evaluation_args.update({"range": args.range_})
return evaluation_args
# Two cases to use out_of_bag and sample_rate: standard evaluations where
# only the training set is provided, and cross_validation
# [--dataset|--test] [--model|--models|--model-tag|--ensemble] --evaluate
if (((hasattr(args, "dataset") and args.dataset) or args.test_set)
and args.has_supervised_):
return evaluation_args
# [--train|--dataset] --test-split --evaluate
if args.test_split > 0 and (args.training_set or args.dataset):
return evaluation_args
# --datasets --test-datasets or equivalents
#if args.datasets and (args.test_datasets or args.dataset_off):
if args.has_datasets_ and (args.has_test_datasets_ or args.dataset_off):
return evaluation_args
if args.sample_rate == 1:
args.sample_rate = EVALUATE_SAMPLE_RATE
evaluation_args.update(out_of_bag=True, seed=SEED,
sample_rate=args.sample_rate)
return evaluation_args
def set_label_evaluation_args(args, labels, all_labels,
number_of_evaluations, fields, dataset_fields,
objective_field):
"""Set of args needed to build an evaluation per label
"""
if objective_field is None:
try:
objective_id = fields.field_id(fields.objective_field)
except ValueError, exc:
sys.exit(exc)
objective_field = fields.fields[objective_id]['name']
evaluation_args_list = []
for index in range(number_of_evaluations - 1, -1, -1):
label = labels[index]
new_name = label_model_args(
args.name, label, all_labels, [], objective_field)[0]
evaluation_args = set_evaluation_args(args,
fields=fields,
dataset_fields=dataset_fields,
name=new_name)
evaluation_args_list.append(evaluation_args)
return evaluation_args_list
def create_evaluations(model_or_ensemble_ids, datasets, evaluation_args,
args, api=None,
path=None, session_file=None, log=None,
existing_evaluations=0):
"""Create evaluations for a list of models
``model_or_ensemble_ids``: list of model or ensemble ids to create
an evaluation of
``datasets``: dataset objects or ids to evaluate with
``evaluation_args``: arguments for the ``create_evaluation`` call
``args``: input values for bigmler flags
``api``: api to remote objects in BigML
``path``: directory to store the BigMLer generated files in
``session_file``: file to store the messages of that session
``log``: user provided log file
``existing_evaluations``: evaluations found when attempting resume
"""
evaluations = []
dataset = datasets[0]
evaluation_args_list = []
if isinstance(evaluation_args, list):
evaluation_args_list = evaluation_args
if api is None:
api = bigml.api.BigML()
remaining_ids = model_or_ensemble_ids[existing_evaluations:]
if args.test_dataset_ids or args.dataset_off:
remaining_datasets = datasets[existing_evaluations:]
number_of_evaluations = len(remaining_ids)
message = dated("Creating evaluations.\n")
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_evaluations):
model = remaining_ids[i]
if args.test_dataset_ids or args.dataset_off:
dataset = remaining_datasets[i]
wait_for_available_tasks(inprogress, args.max_parallel_evaluations,
api, "evaluation")
if evaluation_args_list != []:
evaluation_args = evaluation_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_evaluations)
evaluation_args.update(seed=new_seed)
evaluation = api.create_evaluation(model, dataset, evaluation_args,
retries=None)
evaluation_id = check_resource_error(evaluation,
"Failed to create evaluation: ")
inprogress.append(evaluation_id)
log_created_resources("evaluations", path, evaluation_id,
mode='a')
evaluations.append(evaluation)
log_message("%s\n" % evaluation['resource'], log_file=log)
if (args.number_of_evaluations < 2 and len(evaluations) == 1
and args.verbosity):
evaluation = evaluations[0]
if bigml.api.get_status(evaluation)['code'] != bigml.api.FINISHED:
try:
evaluation = check_resource(evaluation, api.get_evaluation)
except ValueError, exception:
sys.exit("Failed to get a finished evaluation: %s" %
str(exception))
evaluations[0] = evaluation
message = dated("Evaluation created: %s\n" %
get_url(evaluation))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, evaluation)
return evaluations
def get_evaluation(evaluation, api=None, verbosity=True, session_file=None):
"""Retrieves evaluation in its actual state
"""
if api is None:
api = bigml.api.BigML()
message = dated("Retrieving evaluation. %s\n" %
get_url(evaluation))
log_message(message, log_file=session_file, console=verbosity)
try:
evaluation = check_resource(evaluation, api.get_evaluation)
except ValueError, exception:
sys.exit("Failed to get a finished evaluation: %s" % str(exception))
return evaluation
def update_evaluation(evaluation, evaluation_args, args,
api=None, path=None, session_file=None):
"""Updates evaluation properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating evaluation. %s\n" %
get_url(evaluation))
log_message(message, log_file=session_file,
console=args.verbosity)
evaluation = api.update_evaluation(evaluation, evaluation_args)
check_resource_error(evaluation, "Failed to update evaluation: %s"
% evaluation['resource'])
evaluation = check_resource(evaluation, api.get_evaluation)
if is_shared(evaluation):
message = dated("Shared evaluation link. %s\n" %
get_url(evaluation, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, evaluation)
return evaluation
def save_evaluation(evaluation, output, api=None):
"""Creates the evaluation .txt and .json files
"""
if api is None:
api = bigml.api.BigML()
evaluation = evaluation.get('object', evaluation).get('result', evaluation)
save_txt_and_json(evaluation, output, api=api)
def save_txt_and_json(object_dict, output, api=None):
"""Saves in txt and JSON format the contents of a dict object
"""
open_mode = 'wt' if PYTHON3 else 'wb'
message = json.dumps(object_dict)
if not PYTHON3:
message = utf8(message)
with open(output + '.json', open_mode) as dict_json:
dict_json.write(message)
with open(output + '.txt', open_mode) as dict_txt:
api.pprint(object_dict, dict_txt)
def set_batch_prediction_args(args, fields=None,
dataset_fields=None):
"""Return batch prediction args dict
"""
batch_prediction_args = set_basic_batch_args(args, args.name)
if hasattr(args, 'method') and args.method:
batch_prediction_args.update({"combiner": args.method})
if args.method == THRESHOLD_CODE:
threshold = {}
if hasattr(args, 'threshold') and args.threshold is not None:
threshold.update(k=args.threshold)
if hasattr(args, 'threshold_class') \
and args.threshold_class is not None:
threshold.update({"class": args.threshold_class})
batch_prediction_args.update(threshold=threshold)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_prediction_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info in [NORMAL_FORMAT, FULL_FORMAT]:
if (hasattr(args, 'boosting') and args.boosting) or \
(hasattr(args, 'probability') and args.probability):
batch_prediction_args.update(probability=True)
else:
batch_prediction_args.update(confidence=True)
if args.prediction_info == FULL_FORMAT:
batch_prediction_args.update(all_fields=True)
if hasattr(args, 'prediction_name') and args.prediction_name:
batch_prediction_args.update(prediction_name=args.prediction_name)
if args.prediction_fields:
batch_prediction_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_prediction_args.update(output_fields=prediction_fields)
if hasattr(args, 'missing_strategy') and args.missing_strategy:
batch_prediction_args.update(missing_strategy=args.missing_strategy)
if hasattr(args, "operating_point_") and args.operating_point_:
batch_prediction_args.update(operating_point=args.operating_point_)
if args.operating_point_.get("kind") == "probability":
batch_prediction_args.update({"probability": True,
"confidence": False})
if 'batch_prediction' in args.json_args:
update_json_args(
batch_prediction_args,
args.json_args.get('batch_prediction'),
fields)
return batch_prediction_args
def create_batch_prediction(model_or_ensemble, test_dataset,
batch_prediction_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch_prediction
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch prediction.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_prediction = api.create_batch_prediction(model_or_ensemble,
test_dataset,
batch_prediction_args,
retries=None)
log_created_resources("batch_prediction", path,
bigml.api.get_batch_prediction_id(batch_prediction),
mode='a')
batch_prediction_id = check_resource_error(
batch_prediction, "Failed to create batch prediction: ")
try:
batch_prediction = check_resource(batch_prediction,
api.get_batch_prediction)
except ValueError, exception:
sys.exit("Failed to get a finished batch prediction: %s"
% str(exception))
message = dated("Batch prediction created: %s\n"
% get_url(batch_prediction))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_prediction_id, log_file=log)
if args.reports:
report(args.reports, path, batch_prediction)
return batch_prediction
def set_cluster_args(args, name=None, fields=None,
cluster_fields=None):
"""Return cluster arguments dict
"""
if name is None:
name = args.name
if cluster_fields is None:
cluster_fields = args.cluster_fields_
cluster_args = set_basic_model_args(args, name)
cluster_args.update({
"seed": SEED if args.seed is None else args.seed,
"cluster_seed": (SEED if args.cluster_seed is None
else args.cluster_seed)
})
if args.cluster_models is not None:
cluster_args.update({"model_clusters": True})
if args.cluster_k:
cluster_args.update({"k": args.cluster_k})
if cluster_fields and fields is not None:
input_fields = configure_input_fields(fields, cluster_fields)
cluster_args.update(input_fields=input_fields)
if args.summary_fields is not None:
cluster_args.update({"summary_fields": args.summary_fields_})
cluster_args = update_sample_parameters_args(cluster_args, args)
if 'cluster' in args.json_args:
update_json_args(cluster_args, args.json_args.get('cluster'), fields)
return cluster_args
def create_clusters(datasets, cluster_ids, cluster_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote clusters
"""
if api is None:
api = bigml.api.BigML()
clusters = cluster_ids[:]
existing_clusters = len(clusters)
cluster_args_list = []
datasets = datasets[existing_clusters:]
# if resuming and all clusters were created, there will be no datasets left
if datasets:
if isinstance(cluster_args, list):
cluster_args_list = cluster_args
# Only one cluster per command, at present
number_of_clusters = 1
message = dated("Creating %s.\n" %
plural("cluster", number_of_clusters))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_clusters):
wait_for_available_tasks(inprogress, args.max_parallel_clusters,
api, "cluster")
if cluster_args_list:
cluster_args = cluster_args_list[i]
cluster = api.create_cluster(datasets, cluster_args, retries=None)
cluster_id = check_resource_error(cluster,
"Failed to create cluster: ")
log_message("%s\n" % cluster_id, log_file=log)
cluster_ids.append(cluster_id)
inprogress.append(cluster_id)
clusters.append(cluster)
log_created_resources("clusters", path, cluster_id, mode='a')
if args.verbosity:
if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED:
try:
cluster = check_resource(cluster, api.get_cluster,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished cluster: %s" %
str(exception))
clusters[0] = cluster
message = dated("Cluster created: %s\n" %
get_url(cluster))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, cluster)
return clusters, cluster_ids
def get_clusters(cluster_ids, args, api=None, session_file=None):
"""Retrieves remote clusters in its actual status
"""
if api is None:
api = bigml.api.BigML()
cluster_id = ""
clusters = cluster_ids
cluster_id = cluster_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("cluster", len(cluster_ids)),
get_url(cluster_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one cluster to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
cluster = check_resource(cluster_ids[0], api.get_cluster,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished cluster: %s" % str(exception))
clusters[0] = cluster
return clusters, cluster_ids
def set_batch_centroid_args(args, fields=None,
dataset_fields=None):
"""Return batch centroid args dict
"""
batch_centroid_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_centroid_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_centroid_args.update(all_fields=True)
if args.prediction_fields:
batch_centroid_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_centroid_args.update(output_fields=prediction_fields)
if 'batch_centroid' in args.json_args:
update_json_args(
batch_centroid_args, args.json_args.get('batch_centroid'), fields)
return batch_centroid_args
def create_batch_centroid(cluster, test_dataset,
batch_centroid_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch_centroid
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch centroid.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_centroid = api.create_batch_centroid(cluster,
test_dataset,
batch_centroid_args,
retries=None)
log_created_resources("batch_centroid", path,
bigml.api.get_batch_centroid_id(batch_centroid),
mode='a')
batch_centroid_id = check_resource_error(
batch_centroid, "Failed to create batch prediction: ")
try:
batch_centroid = check_resource(batch_centroid,
api.get_batch_centroid)
except ValueError, exception:
sys.exit("Failed to get a finished batch centroid: %s"
% str(exception))
message = dated("Batch centroid created: %s\n"
% get_url(batch_centroid))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_centroid_id, log_file=log)
if args.reports:
report(args.reports, path, batch_centroid)
return batch_centroid
def set_publish_cluster_args(args):
"""Set args to publish cluster
"""
public_cluster = {}
if args.public_cluster:
public_cluster = {"private": False}
if args.model_price:
public_cluster.update(price=args.model_price)
if args.cpp:
public_cluster.update(credits_per_prediction=args.cpp)
return public_cluster
def update_cluster(cluster, cluster_args, args,
api=None, path=None, session_file=None):
"""Updates cluster properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating cluster. %s\n" %
get_url(cluster))
log_message(message, log_file=session_file,
console=args.verbosity)
cluster = api.update_cluster(cluster, cluster_args)
check_resource_error(cluster, "Failed to update cluster: %s"
% cluster['resource'])
cluster = check_resource(cluster, api.get_cluster, query_string=FIELDS_QS)
if is_shared(cluster):
message = dated("Shared cluster link. %s\n" %
get_url(cluster, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, cluster)
return cluster
def set_batch_anomaly_score_args(args, fields=None,
dataset_fields=None):
"""Return batch anomaly score args dict
"""
batch_anomaly_score_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_anomaly_score_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_anomaly_score_args.update(all_fields=True)
if args.prediction_fields:
batch_anomaly_score_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_anomaly_score_args.update(output_fields=prediction_fields)
if 'batch_anomaly_score' in args.json_args:
update_json_args(
batch_anomaly_score_args,
args.json_args.get('batch_anomaly_score'),
fields)
return batch_anomaly_score_args
def set_anomaly_args(args, name=None, fields=None, anomaly_fields=None):
"""Return anomaly arguments dict
"""
if name is None:
name = args.name
if anomaly_fields is None:
anomaly_fields = args.anomaly_fields_
anomaly_args = set_basic_model_args(args, name)
anomaly_args.update({
"seed": SEED if args.seed is None else args.seed,
"anomaly_seed": (SEED if args.anomaly_seed is None
else args.anomaly_seed)
})
if anomaly_fields and fields is not None:
input_fields = configure_input_fields(fields, anomaly_fields)
anomaly_args.update(input_fields=input_fields)
if args.top_n > 0:
anomaly_args.update(top_n=args.top_n)
if args.forest_size > 0:
anomaly_args.update(forest_size=args.forest_size)
anomaly_args = update_sample_parameters_args(anomaly_args, args)
if 'anomaly' in args.json_args:
update_json_args(anomaly_args, args.json_args.get('anomaly'), fields)
return anomaly_args
def set_publish_anomaly_args(args):
"""Set args to publish anomaly
"""
public_anomaly = {}
if args.public_anomaly:
public_anomaly = {"private": False}
if args.model_price:
public_anomaly.update(price=args.model_price)
if args.cpp:
public_anomaly.update(credits_per_prediction=args.cpp)
return public_anomaly
def create_anomalies(datasets, anomaly_ids, anomaly_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote anomalies
"""
if api is None:
api = bigml.api.BigML()
anomalies = anomaly_ids[:]
existing_anomalies = len(anomalies)
anomaly_args_list = []
datasets = datasets[existing_anomalies:]
# if resuming and all anomalies were created,
# there will be no datasets left
if datasets:
if isinstance(anomaly_args, list):
anomaly_args_list = anomaly_args
# Only one anomaly per command, at present
number_of_anomalies = 1
message = dated("Creating %s.\n" %
plural("anomaly detector", number_of_anomalies))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_anomalies):
wait_for_available_tasks(inprogress, args.max_parallel_anomalies,
api, "anomaly")
if anomaly_args_list:
anomaly_args = anomaly_args_list[i]
anomaly = api.create_anomaly(datasets, anomaly_args, retries=None)
anomaly_id = check_resource_error(anomaly,
"Failed to create anomaly: ")
log_message("%s\n" % anomaly_id, log_file=log)
anomaly_ids.append(anomaly_id)
inprogress.append(anomaly_id)
anomalies.append(anomaly)
log_created_resources("anomalies", path, anomaly_id, mode='a')
if args.verbosity:
if bigml.api.get_status(anomaly)['code'] != bigml.api.FINISHED:
try:
anomaly = api.check_resource(anomaly,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished anomaly: %s" %
str(exception))
anomalies[0] = anomaly
message = dated("Anomaly created: %s\n" %
get_url(anomaly))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, anomaly)
return anomalies, anomaly_ids
def get_anomalies(anomaly_ids, args, api=None, session_file=None):
"""Retrieves remote anomalies in its actual status
"""
if api is None:
api = bigml.api.BigML()
anomaly_id = ""
anomalies = anomaly_ids
anomaly_id = anomaly_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("anomaly detector", len(anomaly_ids)),
get_url(anomaly_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one anomaly to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
anomaly = api.check_resource(anomaly_ids[0],
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished anomaly: %s" % str(exception))
anomalies[0] = anomaly
return anomalies, anomaly_ids
def create_batch_anomaly_score(anomaly, test_dataset,
batch_anomaly_score_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch anomaly score
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch anomaly score.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_anomaly_score = api.create_batch_anomaly_score(
anomaly, test_dataset, batch_anomaly_score_args, retries=None)
log_created_resources(
"batch_anomaly_score", path,
bigml.api.get_batch_anomaly_score_id(batch_anomaly_score),
mode='a')
batch_anomaly_score_id = check_resource_error(
batch_anomaly_score, "Failed to create batch prediction: ")
try:
batch_anomaly_score = api.check_resource(batch_anomaly_score)
except ValueError, exception:
sys.exit("Failed to get a finished batch anomaly score: %s"
% str(exception))
message = dated("Batch anomaly score created: %s\n"
% get_url(batch_anomaly_score))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_anomaly_score_id, log_file=log)
if args.reports:
report(args.reports, path, batch_anomaly_score)
return batch_anomaly_score
def update_anomaly(anomaly, anomaly_args, args,
api=None, path=None, session_file=None):
"""Updates anomaly properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating anomaly detector. %s\n" %
get_url(anomaly))
log_message(message, log_file=session_file,
console=args.verbosity)
anomaly = api.update_anomaly(anomaly, anomaly_args)
check_resource_error(anomaly, "Failed to update anomaly: %s"
% anomaly['resource'])
anomaly = api.check_resource(anomaly, query_string=FIELDS_QS)
if is_shared(anomaly):
message = dated("Shared anomaly link. %s\n" %
get_url(anomaly, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, anomaly)
return anomaly
def set_project_args(args, name=None):
"""Return project arguments dict
"""
if name is None:
name = args.name
project_args = set_basic_args(args, name)
if 'project' in args.json_args:
update_json_args(project_args, args.json_args.get('project'), None)
return project_args
def create_project(project_args, args, api=None,
session_file=None, path=None, log=None):
"""Creates remote project
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating project.\n")
log_message(message, log_file=session_file, console=args.verbosity)
project = api.create_project(project_args)
log_created_resources("project", path,
bigml.api.get_project_id(project), mode='a')
project_id = check_resource_error(project, "Failed to create project: ")
try:
project = check_resource(project, api=api)
except ValueError, exception:
sys.exit("Failed to get a finished project: %s" % str(exception))
message = dated("Project \"%s\" has been created.\n" %
project['object']['name'])
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % project_id, log_file=log)
try:
if args.reports:
report(args.reports, path, project)
except AttributeError:
pass
return project
def update_project(project_args, args,
api=None, session_file=None, log=None):
"""Updates project properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating project attributes.\n")
log_message(message, log_file=session_file,
console=args.verbosity)
project = api.update_project(args.project_id, project_args)
check_resource_error(project, "Failed to update project: %s"
% project['resource'])
message = dated("Project \"%s\" has been updated.\n" %
project['resource'])
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % args.project_id, log_file=log)
return project
def get_project_by_name(project, api=None, verbosity=True, session_file=None):
"""Retrieves the project info by project name
"""
if api is None:
api = bigml.api.BigML()
project_id = None
if (isinstance(project, basestring) or
bigml.api.get_status(project)['code'] != bigml.api.FINISHED):
message = dated("Retrieving project info.\n")
log_message(message, log_file=session_file,
console=verbosity)
projects = api.list_projects(query_string="name=%s" % project)
projects = projects.get('objects', [])
if projects:
project_id = projects[0]['resource']
return project_id
def set_sample_args(args, name=None):
"""Return sample arguments dict
"""
if name is None:
name = args.name
sample_args = set_basic_args(args, name)
if 'sample' in args.json_args:
update_json_args(sample_args, args.json_args.get('sample'))
return sample_args
def create_samples(datasets, sample_ids, sample_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote samples
"""
if api is None:
api = bigml.api.BigML()
samples = sample_ids[:]
existing_samples = len(samples)
sample_args_list = []
datasets = datasets[existing_samples:]
# if resuming and all samples were created, there will be no datasets left
if datasets:
if isinstance(sample_args, list):
sample_args_list = sample_args
# Only one sample per command, at present
number_of_samples = 1
max_parallel_samples = 1
message = dated("Creating %s.\n" %
plural("sample", number_of_samples))
log_message(message, log_file=session_file,
console=args.verbosity)
inprogress = []
for i in range(0, number_of_samples):
wait_for_available_tasks(inprogress, max_parallel_samples,
api, "sample")
if sample_args_list:
sample_args = sample_args_list[i]
sample = api.create_sample(datasets[i], sample_args, retries=None)
sample_id = check_resource_error(sample,
"Failed to create sample: ")
log_message("%s\n" % sample_id, log_file=log)
sample_ids.append(sample_id)
inprogress.append(sample_id)
samples.append(sample)
log_created_resources("samples", path, sample_id, mode='a')
if args.verbosity:
if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED:
try:
sample = check_resource(sample, api.get_sample)
except ValueError, exception:
sys.exit("Failed to get a finished sample: %s" %
str(exception))
samples[0] = sample
message = dated("Sample created: %s\n" %
get_url(sample))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, sample)
return samples, sample_ids
def update_sample(sample, sample_args, args,
api=None, path=None, session_file=None):
"""Updates sample properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating sample. %s\n" %
get_url(sample))
log_message(message, log_file=session_file,
console=args.verbosity)
sample = api.update_sample(sample, sample_args)
check_resource_error(sample, "Failed to update sample: %s"
% sample['resource'])
sample = check_resource(sample, api.get_sample)
if is_shared(sample):
message = dated("Shared sample link. %s\n" %
get_url(sample, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, sample)
return sample
def get_samples(sample_ids, args,
api=None, session_file=None, query_string=''):
"""Retrieves remote samples in its actual status
"""
if api is None:
api = bigml.api.BigML()
sample_id = ""
samples = sample_ids
sample_id = sample_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("sample", len(sample_ids)),
get_url(sample_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one sample to predict at present
try:
sample = api.get_sample(sample_ids[0],
query_string=query_string)
check_resource_error(sample, "Failed to create sample: %s"
% sample['resource'])
sample = check_resource(sample, api=api, query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished sample: %s" % str(exception))
samples[0] = sample
return samples, sample_ids
def set_publish_sample_args(args):
"""Set args to publish sample
"""
public_sample = {}
if args.public_sample:
public_sample = {"private": False}
return public_sample
def set_association_args(args, name=None, fields=None,
association_fields=None):
"""Return association arguments dict
"""
if name is None:
name = args.name
if association_fields is None:
association_fields = args.association_fields_
association_args = set_basic_model_args(args, name)
if association_fields and fields is not None:
input_fields = configure_input_fields(fields, association_fields)
association_args.update(input_fields=input_fields)
if args.association_k:
association_args.update({"max_k": args.association_k})
if args.search_strategy:
association_args.update({"search_strategy": args.search_strategy})
association_args = update_sample_parameters_args(association_args, args)
if 'association' in args.json_args:
update_json_args(association_args,
args.json_args.get('association'), fields)
return association_args
def create_associations(datasets, association_ids, association_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote associations
"""
if api is None:
api = bigml.api.BigML()
associations = association_ids[:]
existing_associations = len(associations)
association_args_list = []
datasets = datasets[existing_associations:]
# if resuming and all associations were created,
# there will be no datasets left
if datasets:
if isinstance(association_args, list):
association_args_list = association_args
# Only one association per command, at present
number_of_associations = 1
message = dated("Creating %s.\n" %
plural("association", number_of_associations))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_associations):
wait_for_available_tasks(inprogress,
args.max_parallel_associations,
api, "association")
if association_args_list:
association_args = association_args_list[i]
association = api.create_association(
datasets, association_args, retries=None)
association_id = check_resource_error( \
association, "Failed to create association: ")
log_message("%s\n" % association_id, log_file=log)
association_ids.append(association_id)
inprogress.append(association_id)
associations.append(association)
log_created_resources( \
"associations", path, association_id, mode='a')
if args.verbosity:
if bigml.api.get_status(association)['code'] != bigml.api.FINISHED:
try:
association = check_resource( \
association, api.get_association,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished association: %s" %
str(exception))
associations[0] = association
message = dated("Association created: %s\n" %
get_url(association))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, association)
return associations, association_ids
def get_associations(association_ids, args, api=None, session_file=None):
"""Retrieves remote associations in its actual status
"""
if api is None:
api = bigml.api.BigML()
association_id = ""
associations = association_ids
association_id = association_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("association", len(association_ids)),
get_url(association_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one association to predict at present
try:
query_string = FIELDS_QS
association = check_resource(association_ids[0], api.get_association,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished association: %s" % str(exception))
associations[0] = association
return associations, association_ids
def set_publish_association_args(args):
"""Set args to publish association
"""
public_association = {}
if args.public_association:
public_association = {"private": False}
if args.model_price:
public_association.update(price=args.model_price)
if args.cpp:
public_association.update(credits_per_prediction=args.cpp)
return public_association
def update_association(association, association_args, args,
api=None, path=None, session_file=None):
"""Updates association properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating association. %s\n" %
get_url(association))
log_message(message, log_file=session_file,
console=args.verbosity)
association = api.update_association(association, association_args)
check_resource_error(association, "Failed to update association: %s"
% association['resource'])
association = check_resource(association,
api.get_association, query_string=FIELDS_QS)
if is_shared(association):
message = dated("Shared association link. %s\n" %
get_url(association, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, association)
return association
def set_script_args(args, name=None):
"""Returns a script arguments dict
"""
if name is None:
name = args.name
script_args = set_basic_args(args, name)
if args.project_id is not None:
script_args.update({"project": args.project_id})
if args.imports is not None:
script_args.update({"imports": args.imports_})
if args.parameters_ is not None:
script_args.update({"inputs": args.parameters_})
if args.declare_outputs_:
script_args.update({"outputs": args.declare_outputs_})
update_attributes(script_args, args.json_args.get('script'))
return script_args
def create_script(source_code, script_args, args, api=None, path=None,
session_file=None, log=None):
"""Creates remote script
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating script \"%s\".\n" % script_args["name"])
log_message(message, log_file=session_file, console=args.verbosity)
script = api.create_script(source_code, script_args)
log_created_resources("scripts", path,
bigml.api.get_script_id(script), mode='a')
script_id = check_resource_error(script, "Failed to create script: ")
try:
script = check_resource(script, api.get_script)
except ValueError, exception:
sys.exit("Failed to get a compiled script: %s" % str(exception))
message = dated("Script created: %s\n" % get_url(script))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % script_id, log_file=log)
return script
def get_script(script, api=None, verbosity=True,
session_file=None):
"""Retrieves the script in its actual state
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(script, basestring) or
bigml.api.get_status(script)['code'] != bigml.api.FINISHED):
message = dated("Retrieving script. %s\n" %
get_url(script))
log_message(message, log_file=session_file,
console=verbosity)
try:
script = check_resource(script, api.get_script)
except ValueError, exception:
sys.exit("Failed to get a compiled script: %s" % str(exception))
return script
def set_execution_args(args, name=None):
"""Returns an execution arguments dict
"""
if name is None:
name = args.name
execution_args = set_basic_args(args, name)
if args.project_id is not None:
execution_args.update({"project": args.project_id})
if args.arguments_:
execution_args.update({"inputs": args.arguments_})
if args.creation_defaults is not None:
execution_args.update({"creation_defaults": args.creation_defaults_})
if args.outputs_:
execution_args.update({"outputs": args.outputs_})
if args.input_maps_:
execution_args.update({"input_maps_": args.input_maps_})
update_attributes(execution_args, args.json_args.get('execution'))
return execution_args
def create_execution(execution_args, args, api=None, path=None,
session_file=None, log=None):
"""Creates remote execution
"""
message = dated("Creating execution.\n")
log_message(message, log_file=session_file, console=args.verbosity)
scripts = args.script_ids if args.script_ids else args.script
execution = api.create_execution(scripts, execution_args)
log_created_resources("execution", path,
bigml.api.get_execution_id(execution), mode='a')
execution_id = check_resource_error(execution,
"Failed to create execution: ")
try:
execution = check_resource(execution, api.get_execution)
except ValueError, exception:
sys.exit("Failed to get a finished execution: %s" % str(exception))
message = dated("Execution created: %s\n" % get_url(execution))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % execution_id, log_file=log)
return execution
def get_execution(execution, api=None, verbosity=True,
session_file=None):
"""Retrieves the execution in its actual state
"""
if api is None:
api = bigml.api.BigML()
if (isinstance(execution, basestring) or
bigml.api.get_status(execution)['code'] != bigml.api.FINISHED):
message = dated("Retrieving execution. %s\n" %
get_url(execution))
log_message(message, log_file=session_file,
console=verbosity)
try:
execution = check_resource(execution, api.get_execution)
except ValueError, exception:
sys.exit("Failed to get a finished execution: %s" % str(exception))
return execution
def set_logistic_regression_args(args, name=None, fields=None,
objective_id=None,
logistic_regression_fields=None):
"""Return logistic regression arguments dict
"""
if name is None:
name = args.name
if logistic_regression_fields is None:
logistic_regression_fields = args.logistic_regression_fields_
if objective_id is None:
objective_id = args.objective_id_
logistic_regression_args = set_basic_model_args(args, name)
logistic_regression_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
logistic_regression_args.update({"objective_field": objective_id})
if logistic_regression_fields and fields is not None:
input_fields = configure_input_fields(fields,
logistic_regression_fields)
logistic_regression_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
logistic_regression_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
logistic_regression_args.update({"sample_rate": args.sample_rate})
if args.lr_c:
logistic_regression_args.update({"c": args.lr_c})
logistic_regression_args.update({"bias": args.bias})
logistic_regression_args.update( \
{"balance_fields": args.balance_fields})
if args.eps:
logistic_regression_args.update({"eps": args.eps})
if args.normalize is not None:
logistic_regression_args.update({"normalize": args.normalize})
if args.missing_numerics is not None:
logistic_regression_args.update( \
{"missing_numerics": args.missing_numerics})
if args.field_codings is not None:
logistic_regression_args.update(\
{"field_codings": args.field_codings_})
logistic_regression_args = update_sample_parameters_args( \
logistic_regression_args, args)
if 'logistic_regression' in args.json_args:
update_json_args(logistic_regression_args,
args.json_args.get('logistic_regression'),
fields)
return logistic_regression_args
def create_logistic_regressions(datasets, logistic_regression_ids,
logistic_regression_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote logistic regressions
"""
if api is None:
api = bigml.api.BigML()
logistic_regressions = logistic_regression_ids[:]
existing_logistic_regressions = len(logistic_regressions)
logistic_regression_args_list = []
datasets = datasets[existing_logistic_regressions:]
# if resuming and all logistic regressions were created,
# there will be no datasets left
if datasets:
if isinstance(logistic_regression_args, list):
logistic_regression_args_list = logistic_regression_args
# Only one logistic regression per command, at present
number_of_logistic_regressions = 1
message = dated("Creating %s.\n" %
plural("logistic regression",
number_of_logistic_regressions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_logistic_regressions):
wait_for_available_tasks(inprogress,
args.max_parallel_logistic_regressions,
api, "logisticregression")
if logistic_regression_args_list:
logistic_regression_args = logistic_regression_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_logistic_regressions)
logistic_regression_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
logistic_regression = api.create_logistic_regression( \
dataset, logistic_regression_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_logistic_regressions]
logistic_regression = api.create_logistic_regression( \
multi_dataset, logistic_regression_args, retries=None)
else:
logistic_regression = api.create_logistic_regression( \
datasets, logistic_regression_args, retries=None)
logistic_regression_id = check_resource_error( \
logistic_regression, "Failed to create logistic regression: ")
log_message("%s\n" % logistic_regression_id, log_file=log)
logistic_regression_ids.append(logistic_regression_id)
inprogress.append(logistic_regression_id)
logistic_regressions.append(logistic_regression)
log_created_resources("logistic_regressions",
path,
logistic_regression_id, mode='a')
if args.verbosity:
if bigml.api.get_status(logistic_regression)['code'] != \
bigml.api.FINISHED:
try:
logistic_regression = check_resource( \
logistic_regression, api.get_logistic_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished logistic regression:"
" %s" %
str(exception))
logistic_regressions[0] = logistic_regression
message = dated("Logistic regression created: %s\n" %
get_url(logistic_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, logistic_regression)
return logistic_regressions, logistic_regression_ids
def get_logistic_regressions(logistic_regression_ids,
args, api=None, session_file=None):
"""Retrieves remote logistic regression in its actual status
"""
if api is None:
api = bigml.api.BigML()
logistic_regression_id = ""
logistic_regressions = logistic_regression_ids
logistic_regression_id = logistic_regression_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("logistic regression", len(logistic_regression_ids)),
get_url(logistic_regression_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one logistic regression to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
logistic_regression = check_resource(logistic_regression_ids[0],
api.get_logistic_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished logistic regression: %s" % \
str(exception))
logistic_regressions[0] = logistic_regression
return logistic_regressions, logistic_regression_ids
def set_publish_logistic_regression_args(args):
"""Set args to publish logistic regression
"""
public_logistic_regression = {}
if args.public_logistic_regression:
public_logistic_regression = {"private": False}
if args.model_price:
public_logistic_regression.update(price=args.model_price)
if args.cpp:
public_logistic_regression.update(credits_per_prediction=args.cpp)
return public_logistic_regression
def update_logistic_regression(logistic_regression, logistic_regression_args,
args, api=None, path=None, session_file=None):
"""Updates logistic regression properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating logistic regression. %s\n" %
get_url(logistic_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
logistic_regression = api.update_logistic_regression(logistic_regression, \
logistic_regression_args)
check_resource_error(logistic_regression,
"Failed to update logistic regression: %s"
% logistic_regression['resource'])
logistic_regression = check_resource(logistic_regression,
api.get_logistic_regression,
query_string=FIELDS_QS)
if is_shared(logistic_regression):
message = dated("Shared logistic regression link. %s\n" %
get_url(logistic_regression, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, logistic_regression)
return logistic_regression
def set_linear_regression_args(args, name=None, fields=None,
objective_id=None,
linear_regression_fields=None):
"""Return linear regression arguments dict
"""
if name is None:
name = args.name
if linear_regression_fields is None:
linear_regression_fields = args.linear_regression_fields_
if objective_id is None:
objective_id = args.objective_id_
linear_regression_args = set_basic_model_args(args, name)
linear_regression_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
linear_regression_args.update({"objective_field": objective_id})
if linear_regression_fields and fields is not None:
input_fields = configure_input_fields(fields, linear_regression_fields)
linear_regression_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
linear_regression_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
linear_regression_args.update({"sample_rate": args.sample_rate})
linear_regression_args.update({"bias": args.bias})
if args.field_codings is not None:
linear_regression_args.update(\
{"field_codings": args.field_codings_})
linear_regression_args = update_sample_parameters_args( \
linear_regression_args, args)
if 'linear_regression' in args.json_args:
update_json_args(linear_regression_args,
args.json_args.get('linear_regression'),
fields)
return linear_regression_args
def create_linear_regressions(datasets, linear_regression_ids,
linear_regression_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote linear regressions
"""
if api is None:
api = bigml.api.BigML()
linear_regressions = linear_regression_ids[:]
existing_linear_regressions = len(linear_regressions)
linear_regression_args_list = []
datasets = datasets[existing_linear_regressions:]
# if resuming and all linear regressions were created,
# there will be no datasets left
if datasets:
if isinstance(linear_regression_args, list):
linear_regression_args_list = linear_regression_args
# Only one linear regression per command, at present
number_of_linear_regressions = 1
message = dated("Creating %s.\n" %
plural("linear regression",
number_of_linear_regressions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_linear_regressions):
wait_for_available_tasks(inprogress,
args.max_parallel_linear_regressions,
api, "linearregression")
if linear_regression_args_list:
linear_regression_args = linear_regression_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_linear_regressions)
linear_regression_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
linear_regression = api.create_linear_regression( \
dataset, linear_regression_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_linear_regressions]
linear_regression = api.create_linear_regression( \
multi_dataset, linear_regression_args, retries=None)
else:
linear_regression = api.create_linear_regression( \
datasets, linear_regression_args, retries=None)
linear_regression_id = check_resource_error( \
linear_regression, "Failed to create linear regression: ")
log_message("%s\n" % linear_regression_id, log_file=log)
linear_regression_ids.append(linear_regression_id)
inprogress.append(linear_regression_id)
linear_regressions.append(linear_regression)
log_created_resources("linear_regressions",
path,
linear_regression_id, mode='a')
if args.verbosity:
if bigml.api.get_status(linear_regression)['code'] != \
bigml.api.FINISHED:
try:
linear_regression = check_resource( \
linear_regression, api.get_linear_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished linear regression:"
" %s" %
str(exception))
linear_regressions[0] = linear_regression
message = dated("linear regression created: %s\n" %
get_url(linear_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, linear_regression)
return linear_regressions, linear_regression_ids
def get_linear_regressions(linear_regression_ids,
args, api=None, session_file=None):
"""Retrieves remote linear regression in its actual status
"""
if api is None:
api = bigml.api.BigML()
linear_regression_id = ""
linear_regressions = linear_regression_ids
linear_regression_id = linear_regression_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("linear regression", len(linear_regression_ids)),
get_url(linear_regression_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one linear regression to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
linear_regression = check_resource(linear_regression_ids[0],
api.get_linear_regression,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished linear regression: %s" % \
str(exception))
linear_regressions[0] = linear_regression
return linear_regressions, linear_regression_ids
def set_publish_linear_regression_args(args):
"""Set args to publish linear regression
"""
public_linear_regression = {}
if args.public_linear_regression:
public_linear_regression = {"private": False}
if args.model_price:
public_linear_regression.update(price=args.model_price)
if args.cpp:
public_linear_regression.update(credits_per_prediction=args.cpp)
return public_linear_regression
def update_linear_regression(linear_regression, linear_regression_args,
args, api=None, path=None, session_file=None):
"""Updates linear regression properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating linear regression. %s\n" %
get_url(linear_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
linear_regression = api.update_linear_regression(linear_regression, \
linear_regression_args)
check_resource_error(linear_regression,
"Failed to update linear regression: %s"
% linear_regression['resource'])
linear_regression = check_resource(linear_regression,
api.get_linear_regression,
query_string=FIELDS_QS)
if is_shared(linear_regression):
message = dated("Shared linear regression link. %s\n" %
get_url(linear_regression, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, linear_regression)
return linear_regression
def set_time_series_args(args, name=None, fields=None,
objective_id=None,
time_series_fields=None):
"""Return time-series arguments dict
"""
if name is None:
name = args.name
if objective_id is None:
objective_id = args.objective_id_
time_series_args = set_basic_model_args(args, name)
time_series_args.update({
"all_numeric_objectives": args.all_numeric_objectives,
"period": args.period
})
# if we need to evaluate and there's no previous split, use a range
if args.evaluate and args.test_split == 0 and not args.has_test_datasets_:
args.range_ = [1, int(args.max_rows * EVALUATE_SAMPLE_RATE)]
if objective_id is not None:
time_series_args.update({"objective_field": objective_id})
if args.objectives:
time_series_args.update({"objective_fields": args.objective_fields_})
if args.damped_trend is not None:
time_series_args.update({"damped_trend": args.damped_trend})
if args.error is not None:
time_series_args.update({"error": args.error})
if args.field_parameters:
time_series_args.update({"field_parameters": args.field_parameters_})
if args.range_:
time_series_args.update({"range": args.range_})
if args.seasonality is not None:
time_series_args.update({"seasonality": args.seasonality})
if args.trend is not None:
time_series_args.update({"trend": args.trend})
if args.time_start or args.time_end or args.time_interval or \
args.time_interval_unit:
time_range = {}
if args.time_start:
time_range.update({"start": args.time_start})
if args.time_end:
time_range.update({"end": args.time_end})
if args.time_interval:
time_range.update({"interval": args.time_interval})
if args.time_interval_unit:
time_range.update({"interval_unit": args.time_interval_unit})
time_series.update({"time_range": time_range})
if 'time_series' in args.json_args:
update_json_args(time_series_args,
args.json_args.get('time_series'),
fields)
return time_series_args
def create_time_series(datasets, time_series_ids,
time_series_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote time-series
"""
if api is None:
api = bigml.api.BigML()
time_series_set = time_series_ids[:]
existing_time_series = len(time_series_set)
time_series_args_list = []
datasets = datasets[existing_time_series:]
# if resuming and all time-series were created,
# there will be no datasets left
if datasets:
if isinstance(time_series_args, list):
time_series_args_list = time_series_args
# Only one time-series per command, at present
number_of_time_series = 1
message = dated("Creating %s time-series.\n" %
number_of_time_series)
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_time_series):
wait_for_available_tasks(inprogress,
args.max_parallel_time_series,
api, "timeseries")
if time_series_args_list:
time_series_args = time_series_args_list[i]
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
time_series = api.create_time_series( \
dataset, time_series_args, retries=None)
else:
time_series = api.create_time_series( \
datasets, time_series_args, retries=None)
time_series_id = check_resource_error( \
time_series, "Failed to create time-series: ")
log_message("%s\n" % time_series_id, log_file=log)
time_series_ids.append(time_series_id)
inprogress.append(time_series_id)
time_series_set.append(time_series)
log_created_resources("time_series",
path,
time_series_id, mode='a')
if args.verbosity:
if bigml.api.get_status(time_series)['code'] != \
bigml.api.FINISHED:
try:
time_series = check_resource( \
time_series, api.get_time_series,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished time-series:"
" %s" %
str(exception))
time_series_set[0] = time_series
message = dated("Time-series created: %s\n" %
get_url(time_series))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, time_series)
return time_series_set, time_series_ids
def get_time_series(time_series_ids,
args, api=None, session_file=None):
"""Retrieves remote time-series in its actual status
"""
if api is None:
api = bigml.api.BigML()
time_series_id = ""
time_series_set = time_series_ids
time_series_id = time_series_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("time-series", len(time_series_ids)),
get_url(time_series_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one time-series to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
time_series = check_resource(time_series_ids[0],
api.get_time_series,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished time-series: %s" % \
str(exception))
time_series_set[0] = time_series
return time_series_set, time_series_ids
def set_publish_time_series_args(args):
"""Set args to publish time-series
"""
public_time_series = {}
if args.public_time_series:
public_time_series = {"private": False}
if args.model_price:
public_time_series.update(price=args.model_price)
if args.cpp:
public_time_series.update(credits_per_prediction=args.cpp)
return public_time_series
def update_time_series(time_series, time_series_args,
args, api=None, path=None, session_file=None):
"""Updates time-series properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating time-series. %s\n" %
get_url(time_series))
log_message(message, log_file=session_file,
console=args.verbosity)
time_series = api.update_time_series(time_series, \
time_series_args)
check_resource_error(time_series,
"Failed to update time-series: %s"
% time_series['resource'])
time_series = check_resource(time_series,
api.get_time_series,
query_string=FIELDS_QS)
if is_shared(time_series):
message = dated("Shared time-series link. %s\n" %
get_url(time_series, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, time_series)
return time_series
def set_library_args(args, name=None):
"""Returns a library arguments dict
"""
if name is None:
name = args.name
library_args = set_basic_args(args, name)
if args.project_id is not None:
library_args.update({"project": args.project_id})
if args.imports is not None:
library_args.update({"imports": args.imports_})
update_attributes(library_args, args.json_args.get('library'))
return library_args
def create_library(source_code, library_args, args, api=None, path=None,
session_file=None, log=None):
"""Creates remote library
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating library \"%s\".\n" % library_args["name"])
log_message(message, log_file=session_file, console=args.verbosity)
library = api.create_library(source_code, library_args)
log_created_resources("library", path,
bigml.api.get_library_id(library), mode='a')
library_id = check_resource_error(library, "Failed to create library: ")
try:
library = check_resource(library, api.get_library)
except ValueError, exception:
sys.exit("Failed to get a compiled library: %s" % str(exception))
message = dated("Library created: %s\n" % get_url(library))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % library_id, log_file=log)
return library
def update_sample_parameters_args(resource_args, args):
"""Updates the information related to the common sampling options
"""
if args.sample_rate != 1:
resource_args.update({"sample_rate": args.sample_rate})
if hasattr(args, "out_of_bag") and args.out_of_bag:
resource_args.update({"out_of_bag": True})
if hasattr(args, "replacement") and args.replacement:
resource_args.update({"replacement": True})
if hasattr(args, "randomize") and args.randomize:
resource_args.update({"randomize": True})
return resource_args
def set_topic_model_args(args, name=None, fields=None,
topic_model_fields=None):
"""Return topic_model arguments dict
"""
if name is None:
name = args.name
if topic_model_fields is None:
topic_model_fields = args.topic_model_fields_
topic_model_args = set_basic_args(args, name)
topic_model_args.update({
"seed": SEED if args.seed is None else args.seed,
"topicmodel_seed": SEED if args.seed is None else args.seed
})
if topic_model_fields and fields is not None:
input_fields = configure_input_fields(fields, topic_model_fields)
topic_model_args.update(input_fields=input_fields)
topic_model_args.update({"sample_rate": args.sample_rate})
topic_model_args.update({"bigrams": args.bigrams})
topic_model_args.update({"case_sensitive": args.case_sensitive})
if args.number_of_topics is not None:
topic_model_args.update({"number_of_topics": args.number_of_topics})
if args.term_limit is not None:
topic_model_args.update({"term_limit": args.term_limit})
if args.top_n_terms is not None:
topic_model_args.update({"top_n_terms": args.top_n_terms})
if args.minimum_name_terms is not None:
topic_model_args.update({"minimum_name_terms":
args.minimum_name_terms})
if args.excluded_terms:
topic_model_args.update({"excluded_terms": args.excluded_terms_})
topic_model_args = update_sample_parameters_args( \
topic_model_args, args)
if 'topic_model' in args.json_args:
update_json_args(topic_model_args,
args.json_args.get('topic_model'),
fields)
return topic_model_args
def create_topic_models(datasets, topic_model_ids, topic_model_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote topic models
"""
if api is None:
api = bigml.api.BigML()
topic_models = topic_model_ids[:]
existing_topic_models = len(topic_models)
topic_model_args_list = []
datasets = datasets[existing_topic_models:]
# if resuming and all topic models were created, there will
# be no datasets left
if datasets:
if isinstance(topic_model_args, list):
topic_model_args_list = topic_model_args
# Only one topic model per command, at present
number_of_topic_models = 1
message = dated("Creating %s.\n" %
plural("topic model", number_of_topic_models))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_topic_models):
wait_for_available_tasks(inprogress,
args.max_parallel_topic_models,
api, "topicmodel")
if topic_model_args_list:
topic_model_args = topic_model_args_list[i]
topic_model = api.create_topic_model(datasets,
topic_model_args,
retries=None)
topic_model_id = check_resource_error( \
topic_model,
"Failed to create topic model: ")
log_message("%s\n" % topic_model_id, log_file=log)
topic_model_ids.append(topic_model_id)
inprogress.append(topic_model_id)
topic_models.append(topic_model)
log_created_resources("topic_models", path, topic_model_id,
mode='a')
if args.verbosity:
if bigml.api.get_status(topic_model)['code'] != bigml.api.FINISHED:
try:
topic_model = check_resource( \
topic_model, api.get_topic_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished topic model: %s" %
str(exception))
topic_models[0] = topic_model
message = dated("Topic model created: %s\n" %
get_url(topic_model))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, topic_model)
return topic_models, topic_model_ids
def get_topic_models(topic_model_ids,
args, api=None, session_file=None):
"""Retrieves remote topic model in its actual status
"""
if api is None:
api = bigml.api.BigML()
topic_model_id = ""
topic_models = topic_model_ids
topic_model_id = topic_model_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("topic model", len(topic_model_ids)),
get_url(topic_model_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one topic_model at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
topic_model = check_resource(topic_model_ids[0],
api.get_topic_model,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished topic model: %s" % \
str(exception))
topic_models[0] = topic_model
return topic_models, topic_model_ids
def set_publish_topic_model_args(args):
"""Set args to publish topic model
"""
public_topic_model = {}
if args.public_topic_model:
public_topic_model = {"private": False}
if args.model_price:
public_topic_model.update(price=args.model_price)
if args.cpp:
public_topic_model.update(credits_per_prediction=args.cpp)
return public_topic_model
def update_topic_model(topic_model, topic_model_args,
args, api=None, path=None, session_file=None):
"""Updates topic model properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating topic model. %s\n" %
get_url(topic_model))
log_message(message, log_file=session_file,
console=args.verbosity)
topic_model = api.update_topic_model(topic_model, \
topic_model_args)
check_resource_error(topic_model,
"Failed to update topic model: %s"
% topic_model['resource'])
topic_model = check_resource(topic_model,
api.get_topic_model,
query_string=FIELDS_QS)
if is_shared(topic_model):
message = dated("Shared topic model link. %s\n" %
get_url(topic_model, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, topic_model)
return topic_model
def set_batch_topic_distribution_args( \
args, fields=None, dataset_fields=None):
"""Return batch topic distribution args dict
"""
batch_topic_distribution_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_topic_distribution_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info == FULL_FORMAT:
batch_topic_distribution_args.update(all_fields=True)
if args.prediction_fields:
batch_topic_distribution_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
prediction_fields.append(field)
batch_topic_distribution_args.update(output_fields=prediction_fields)
if 'batch_topic_distribution' in args.json_args:
update_json_args(
batch_topic_distribution_args, args.json_args.get( \
'batch_topic_distribution'), fields)
return batch_topic_distribution_args
def create_batch_topic_distribution(topic_model, test_dataset,
batch_topic_distribution_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch topic distribution
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch topic distribution.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_topic_distribution = api.create_batch_topic_distribution( \
topic_model, test_dataset, batch_topic_distribution_args, retries=None)
log_created_resources( \
"batch_topic_distribution", path,
bigml.api.get_batch_topic_distribution_id(batch_topic_distribution),
mode='a')
batch_topic_distribution_id = check_resource_error(
batch_topic_distribution,
"Failed to create batch topic distribution: ")
try:
batch_topic_distribution = check_resource( \
batch_topic_distribution, api.get_batch_topic_distribution)
except ValueError, exception:
sys.exit("Failed to get a finished batch topic distribution: %s"
% str(exception))
message = dated("Batch topic distribution created: %s\n"
% get_url(batch_topic_distribution))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_topic_distribution_id, log_file=log)
if args.reports:
report(args.reports, path, batch_topic_distribution)
return batch_topic_distribution
def set_forecast_args(args, fields=None):
"""Return forecast dict
"""
forecast_args = set_basic_args(args, args.name)
forecast_args.update({
"intervals": args.intervals,
})
if 'forecast' in args.json_args:
update_json_args(
forecast_args,
args.json_args.get('forecast'),
fields)
return forecast_args
def create_forecast(time_series, input_data, forecast_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote forecast
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating remote forecast.\n")
log_message(message, log_file=session_file, console=args.verbosity)
forecast = api.create_forecast(time_series, input_data,
forecast_args,
retries=None)
log_created_resources("forecast", path,
bigml.api.get_forecast_id(forecast),
mode='a')
forecast_id = check_resource_error(
forecast, "Failed to create forecast: ")
try:
forecast = check_resource(forecast, api.get_forecast)
except ValueError, exception:
sys.exit("Failed to get a finished forecast: %s"
% str(exception))
message = dated("Forecast created: %s\n"
% get_url(forecast))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % forecast_id, log_file=log)
if args.reports:
report(args.reports, path, forecast)
return forecast
def set_deepnet_args(args, name=None, fields=None,
objective_id=None,
deepnet_fields=None):
"""Return deepnet arguments dict
"""
if name is None:
name = args.name
if deepnet_fields is None:
deepnet_fields = args.deepnet_fields_
if objective_id is None:
objective_id = args.objective_id_
deepnet_args = set_basic_model_args(args, name)
deepnet_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
deepnet_args.update({"objective_field": objective_id})
if deepnet_fields and fields is not None:
input_fields = configure_input_fields(fields, deepnet_fields)
deepnet_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
deepnet_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
deepnet_args.update({"sample_rate": args.sample_rate})
if args.batch_normalization is not None:
deepnet_args.update({"batch_normalization": args.batch_normalization})
if args.dropout_rate:
deepnet_args.update({"dropout_rate": args.dropout_rate})
if args.hidden_layers is not None:
deepnet_args.update({"hidden_layers": args.hidden_layers_})
if args.learn_residuals is not None:
deepnet_args.update( \
{"learn_residuals": args.learn_residuals})
if args.max_iterations is not None:
deepnet_args.update(\
{"learning_rate": args.learning_rate})
if args.max_training_time is not None:
deepnet_args.update(\
{"max_training_time": args.max_training_time})
if args.number_of_hidden_layers is not None:
deepnet_args.update(\
{"number_of_hidden_layers": args.number_of_hidden_layers})
if args.number_of_model_candidates is not None:
deepnet_args.update(\
{"number_of_model_candidates": args.number_of_model_candidates})
if args.search is not None:
deepnet_args.update(\
{"search": args.search})
if args.suggest_structure is not None:
deepnet_args.update(\
{"suggest_structure": args.suggest_structure})
if not args.missing_numerics:
deepnet_args.update(\
{"missing_numerics": args.missing_numerics})
if args.tree_embedding:
deepnet_args.update(\
{"tree_embedding": args.tree_embedding})
deepnet_args = update_sample_parameters_args( \
deepnet_args, args)
if 'deepnet' in args.json_args:
update_json_args(deepnet_args,
args.json_args.get('deepnet'),
fields)
return deepnet_args
def create_deepnets(datasets, deepnet_ids,
deepnet_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote deepnets
"""
if api is None:
api = bigml.api.BigML()
deepnets = deepnet_ids[:]
existing_deepnets = len(deepnets)
deepnet_args_list = []
datasets = datasets[existing_deepnets:]
# if resuming and all deepnets were created,
# there will be no datasets left
if datasets:
if isinstance(deepnet_args, list):
deepnet_args_list = deepnet_args
# Only one deepnet per command, at present
number_of_deepnets = 1
message = dated("Creating %s.\n" %
plural("deepnet",
number_of_deepnets))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_deepnets):
wait_for_available_tasks(inprogress,
args.max_parallel_deepnets,
api, "deepnet")
if deepnet_args_list:
deepnet_args = deepnet_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_deepnets)
deepnet_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
deepnet = api.create_deepnet( \
dataset, deepnet_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_deepnets]
deepnet = api.create_deepnet( \
multi_dataset, deepnet_args, retries=None)
else:
deepnet = api.create_deepnet( \
datasets, deepnet_args, retries=None)
deepnet_id = check_resource_error( \
deepnet, "Failed to create deepnet: ")
log_message("%s\n" % deepnet_id, log_file=log)
deepnet_ids.append(deepnet_id)
inprogress.append(deepnet_id)
deepnets.append(deepnet)
log_created_resources("deepnets",
path,
deepnet_id, mode='a')
if args.verbosity:
if bigml.api.get_status(deepnet)['code'] != \
bigml.api.FINISHED:
try:
deepnet = check_resource( \
deepnet, api.get_deepnet,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished deepnet:"
" %s" %
str(exception))
deepnets[0] = deepnet
message = dated("Deepnet created: %s\n" %
get_url(deepnet))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, deepnet)
return deepnets, deepnet_ids
def get_deepnets(deepnet_ids, args, api=None, session_file=None):
"""Retrieves remote deepnet in its actual status
"""
if api is None:
api = bigml.api.BigML()
deepnet_id = ""
deepnets = deepnet_ids
deepnet_id = deepnet_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("deepnet", len(deepnet_ids)),
get_url(deepnet_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one deepnet to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
deepnet = check_resource(deepnet_ids[0],
api.get_deepnet,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished deepnet: %s" % \
str(exception))
deepnets[0] = deepnet
return deepnets, deepnet_ids
def update_deepnets(deepnet, deepnet_args,
args, api=None, path=None, session_file=None):
"""Updates deepnet properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating deepnet. %s\n" %
get_url(deepnet))
log_message(message, log_file=session_file,
console=args.verbosity)
deepnet = api.update_deepnet(deepnet, deepnet_args)
check_resource_error(deepnet,
"Failed to update deepnet: %s"
% deepnet['resource'])
deepnet = check_resource(deepnet,
api.get_deepnet,
query_string=FIELDS_QS)
if is_shared(deepnet):
message = dated("Shared deepnet link. %s\n" %
get_url(deepnet, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, deepnet)
return deepnet
def set_pca_args(args, name=None, fields=None,
pca_fields=None):
"""Return pca arguments dict
"""
if name is None:
name = args.name
if pca_fields is None:
pca_fields = args.pca_fields_
pca_args = set_basic_args(args, name)
pca_args.update({
"seed": SEED if args.seed is None else args.seed,
"pca_seed": SEED if args.seed is None else args.seed
})
pca_args.update({"sample_rate": args.sample_rate})
pca_args = update_sample_parameters_args( \
pca_args, args)
if fields is not None:
input_fields = fields.fields.keys()
if pca_fields and fields is not None:
input_fields = configure_input_fields(fields, pca_fields)
if args.exclude_objective:
input_fields = [field for field in input_fields \
if field not in args.exclude_fields]
pca_args.update(input_fields=input_fields)
if 'pca' in args.json_args:
update_json_args(pca_args,
args.json_args.get('pca'),
fields)
return pca_args
def create_pca(datasets, pca, pca_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote pcas
"""
if api is None:
api = bigml.api.BigML()
pcas = []
pca_ids = []
if pca is not None:
pcas = [pca]
pca_ids = [pca]
existing_pcas = len(pcas)
pca_args_list = []
datasets = datasets[existing_pcas:]
# if resuming and all pcas were created, there will
# be no datasets left
if datasets:
if isinstance(pca_args, list):
pca_args_list = pca_args
# Only one pca per command, at present
number_of_pcas = 1
message = dated("Creating %s.\n" %
plural("pca", number_of_pcas))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_pcas):
wait_for_available_tasks(inprogress,
args.max_parallel_pcas,
api, "pca")
if pca_args_list:
pca_args = pca_args_list[i]
pca = api.create_pca(datasets,
pca_args,
retries=None)
pca_id = check_resource_error( \
pca,
"Failed to create pca: ")
log_message("%s\n" % pca_id, log_file=log)
pca_ids.append(pca_id)
inprogress.append(pca_id)
pcas.append(pca)
log_created_resources("pcas", path, pca_id,
mode='a')
if args.verbosity:
if bigml.api.get_status(pca)['code'] != bigml.api.FINISHED:
try:
pca = check_resource( \
pca, api.get_pca,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished pca: %s" %
str(exception))
pcas[0] = pca
message = dated("PCA created: %s\n" %
get_url(pca))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, pca)
return pca
def get_pca(pca,
args, api=None, session_file=None):
"""Retrieves remote pca in its actual status
"""
if api is None:
api = bigml.api.BigML()
message = dated("Retrieving PCA. %s\n" %
get_url(pca))
log_message(message, log_file=session_file, console=args.verbosity)
# only one PCA at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
pca = check_resource(pca,
api.get_pca,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished pca: %s" % \
str(exception))
return pca
def set_publish_pca_args(args):
"""Set args to publish pca
"""
public_pca = {}
if args.public_pca:
public_pca = {"private": False}
if args.model_price:
public_pca.update(price=args.model_price)
if args.cpp:
public_pca.update(credits_per_prediction=args.cpp)
return public_pca
def update_pca(pca, pca_args,
args, api=None, path=None, session_file=None):
"""Updates pca properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating PCA. %s\n" %
get_url(pca))
log_message(message, log_file=session_file,
console=args.verbosity)
pca = api.update_pca(pca, pca_args)
check_resource_error(pca,
"Failed to update PCA: %s"
% pca['resource'])
pca = check_resource(pca,
api.get_pca,
query_string=FIELDS_QS)
if is_shared(pca):
message = dated("Shared PCA link. %s\n" %
get_url(pca, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, pca)
return pca
def set_batch_projection_args( \
args, fields=None, dataset_fields=None):
"""Return batch projection args dict
"""
batch_projection_args = set_basic_batch_args(args, args.name)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_projection_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
batch_projection_args.update(all_fields=False)
if args.projection_fields:
batch_projection_args.update(all_fields=True)
projection_fields = []
if args.projection_fields != "all":
batch_projection_args.update(all_fields=True)
for field in args.projection_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError, exc:
sys.exit(exc)
projection_fields.append(field)
batch_projection_args.update(output_fields=projection_fields)
if 'batch_projection' in args.json_args:
update_json_args(
batch_projection_args, args.json_args.get( \
'batch_projection'), fields)
return batch_projection_args
def create_batch_projection(pca, test_dataset,
batch_projection_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch projection
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch projection.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_projection = api.create_batch_projection( \
pca, test_dataset, batch_projection_args, retries=None)
log_created_resources( \
"batch_projection", path,
bigml.api.get_batch_projection_id(batch_projection), mode='a')
batch_projection_id = check_resource_error(
batch_projection,
"Failed to create batch projection: ")
try:
batch_projection = check_resource( \
batch_projection, api.get_batch_projection)
except ValueError, exception:
sys.exit("Failed to get a finished batch projection: %s"
% str(exception))
message = dated("Batch projection created: %s\n"
% get_url(batch_projection))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_projection_id, log_file=log)
if args.reports:
report(args.reports, path, batch_projection)
return batch_projection
def set_fusion_args(args, name=None, fields=None):
"""Return fusion arguments dict
"""
if name is None:
name = args.name
fusion_args = set_basic_args(args, name)
if 'fusion' in args.json_args:
update_json_args(fusion_args,
args.json_args.get('fusion'),
fields)
return fusion_args
def create_fusion(models, fusion, fusion_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote fusion
"""
if api is None:
api = bigml.api.BigML()
fusions = []
fusion_ids = []
if fusion is not None:
fusions = [fusion]
fusion_ids = [fusion]
existing_fusions = len(fusions)
# if resuming and all fusions were created
if models:
# Only one fusion per command, at present
number_of_fusions = 1
message = dated("Creating %s.\n" %
plural("fusion", number_of_fusions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_fusions):
wait_for_available_tasks(inprogress,
args.max_parallel_fusions,
api, "fusion")
fusion = api.create_fusion(models,
fusion_args,
retries=None)
fusion_id = check_resource_error( \
fusion,
"Failed to create fusion: ")
log_message("%s\n" % fusion_id, log_file=log)
fusion_ids.append(fusion_id)
inprogress.append(fusion_id)
fusions.append(fusion)
log_created_resources("fusions", path, fusion_id,
mode='a')
if args.verbosity:
if bigml.api.get_status(fusion)['code'] != bigml.api.FINISHED:
try:
fusion = check_resource( \
fusion, api.get_fusion,
query_string=query_string)
except ValueError, exception:
sys.exit("Failed to get a finished fusion: %s" %
str(exception))
fusions[0] = fusion
message = dated("Fusion created: %s\n" %
get_url(fusion))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, fusion)
return fusion
def get_fusion(fusion,
args, api=None, session_file=None):
"""Retrieves remote fusion in its actual status
"""
if api is None:
api = bigml.api.BigML()
message = dated("Retrieving Fusion. %s\n" %
get_url(fusion))
log_message(message, log_file=session_file, console=args.verbosity)
# only one fusion at present
try:
# we need the whole fields structure when exporting fields
fusion = check_resource(fusion,
api.get_fusion,
query_string=ALL_FIELDS_QS)
except ValueError, exception:
sys.exit("Failed to get a finished fusion: %s" % \
str(exception))
return fusion
def set_publish_fusion_args(args):
"""Set args to publish fusion
"""
public_fusion = {}
if args.public_fusion:
public_fusion = {"private": False}
if args.model_price:
public_fusion.update(price=args.model_price)
if args.cpp:
public_fusion.update(credits_per_prediction=args.cpp)
return public_fusion
def update_fusion(fusion, fusion_args, args,
api=None, path=None, session_file=None):
"""Updates fusion properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating Fusion. %s\n" %
get_url(fusion))
log_message(message, log_file=session_file,
console=args.verbosity)
fusion = api.update_fusion(fusion, fusion_args)
check_resource_error(fusion,
"Failed to update Fusion: %s"
% fusion['resource'])
fusion = check_resource(fusion,
api.get_fusion,
query_string=FIELDS_QS)
if is_shared(fusion):
message = dated("Shared Fusion link. %s\n" %
get_url(fusion, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, fusion)
return fusion
| 0 | 0 | 0 |
ed2cca3bf12105002e31b5cf7ef2ec86dbfabae0 | 8,185 | py | Python | src/build/fuchsia/run_package.py | goochen/naiveproxy | 1d0682ee5bae6e648cd43c65f49b4eefd224f206 | [
"BSD-3-Clause"
] | 643 | 2021-08-02T05:04:20.000Z | 2022-03-27T22:56:02.000Z | src/build/fuchsia/run_package.py | goochen/naiveproxy | 1d0682ee5bae6e648cd43c65f49b4eefd224f206 | [
"BSD-3-Clause"
] | 18 | 2021-05-13T05:53:06.000Z | 2022-03-31T21:24:25.000Z | src/build/fuchsia/run_package.py | goochen/naiveproxy | 1d0682ee5bae6e648cd43c65f49b4eefd224f206 | [
"BSD-3-Clause"
] | 16 | 2021-08-31T07:08:45.000Z | 2022-02-14T12:36:15.000Z | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
from __future__ import print_function
import common
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import threading
import uuid
from symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter
FAR = common.GetHostToolPathFromPlatform('far')
# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5
def _AttachKernelLogReader(target):
"""Attaches a kernel log reader as a long-running SSH task."""
logging.info('Attaching kernel logger.')
return target.RunCommandPiped(['dlog', '-f'], stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
class SystemLogReader(object):
"""Collects and symbolizes Fuchsia system log to a file."""
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stops the system logging processes and closes the output file."""
if self._symbolizer_proc:
self._symbolizer_proc.kill()
if self._listener_proc:
self._listener_proc.kill()
if self._system_log:
self._system_log.close()
def Start(self, target, package_paths, system_log_file):
"""Start a system log reader as a long-running SSH task."""
logging.debug('Writing fuchsia system log to %s' % system_log_file)
self._listener_proc = target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._system_log = open(system_log_file,'w', buffering=1)
self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout,
self._system_log,
BuildIdsPaths(package_paths))
class MergedInputStream(object):
"""Merges a number of input streams into a UNIX pipe on a dedicated thread.
Terminates when the file descriptor of the primary stream (the first in
the sequence) is closed."""
def Start(self):
"""Returns a pipe to the merged output stream."""
read_pipe, write_pipe = os.pipe()
# Disable buffering for the stream to make sure there is no delay in logs.
self._output_stream = os.fdopen(write_pipe, 'w', 0)
self._thread = threading.Thread(target=self._Run)
self._thread.start();
return os.fdopen(read_pipe, 'r')
class RunPackageArgs:
"""RunPackage() configuration arguments structure.
symbolizer_config: A newline delimited list of source files contained
in the package. Omitting this parameter will disable symbolization.
system_logging: If set, connects a system log reader to the target.
"""
@staticmethod
def _DrainStreamToStdout(stream, quit_event):
"""Outputs the contents of |stream| until |quit_event| is set."""
while not quit_event.is_set():
rlist, _, _ = select.select([ stream ], [], [], 0.1)
if rlist:
line = rlist[0].readline()
if not line:
return
print(line.rstrip())
def RunPackage(output_dir, target, package_paths, package_name,
package_args, args):
"""Installs the Fuchsia package at |package_path| on the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_paths: The paths to the .far packages to be installed.
package_name: The name of the primary package to run.
package_args: The arguments which will be passed to the Fuchsia process.
args: Structure of arguments to configure how the package will be run.
Returns the exit code of the remote package process."""
system_logger = (
_AttachKernelLogReader(target) if args.system_logging else None)
try:
if system_logger:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(
target=
lambda: _DrainStreamToStdout(system_logger.stdout, log_output_quit_event)
)
log_output_thread.daemon = True
log_output_thread.start()
with target.GetAmberRepo():
target.InstallPackage(package_paths)
if system_logger:
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
logging.info('Running application.')
command = ['run', _GetComponentUri(package_name)] + package_args
process = target.RunCommandPiped(
command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if system_logger:
output_stream = MergedInputStream(
[process.stdout, system_logger.stdout]).Start()
else:
output_stream = process.stdout
# Run the log data through the symbolizer process.
output_stream = SymbolizerFilter(output_stream,
BuildIdsPaths(package_paths))
for next_line in output_stream:
print(next_line.rstrip())
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning(
'Process exited with status code %d.' % process.returncode)
finally:
if system_logger:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
system_logger.kill()
return process.returncode
| 32.224409 | 83 | 0.668907 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
from __future__ import print_function
import common
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import threading
import uuid
from symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter
FAR = common.GetHostToolPathFromPlatform('far')
# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5
def _AttachKernelLogReader(target):
"""Attaches a kernel log reader as a long-running SSH task."""
logging.info('Attaching kernel logger.')
return target.RunCommandPiped(['dlog', '-f'], stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
class SystemLogReader(object):
"""Collects and symbolizes Fuchsia system log to a file."""
def __init__(self):
self._listener_proc = None
self._symbolizer_proc = None
self._system_log = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stops the system logging processes and closes the output file."""
if self._symbolizer_proc:
self._symbolizer_proc.kill()
if self._listener_proc:
self._listener_proc.kill()
if self._system_log:
self._system_log.close()
def Start(self, target, package_paths, system_log_file):
"""Start a system log reader as a long-running SSH task."""
logging.debug('Writing fuchsia system log to %s' % system_log_file)
self._listener_proc = target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._system_log = open(system_log_file,'w', buffering=1)
self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout,
self._system_log,
BuildIdsPaths(package_paths))
class MergedInputStream(object):
"""Merges a number of input streams into a UNIX pipe on a dedicated thread.
Terminates when the file descriptor of the primary stream (the first in
the sequence) is closed."""
def __init__(self, streams):
assert len(streams) > 0
self._streams = streams
self._output_stream = None
self._thread = None
def Start(self):
"""Returns a pipe to the merged output stream."""
read_pipe, write_pipe = os.pipe()
# Disable buffering for the stream to make sure there is no delay in logs.
self._output_stream = os.fdopen(write_pipe, 'w', 0)
self._thread = threading.Thread(target=self._Run)
self._thread.start();
return os.fdopen(read_pipe, 'r')
def _Run(self):
streams_by_fd = {}
primary_fd = self._streams[0].fileno()
for s in self._streams:
streams_by_fd[s.fileno()] = s
# Set when the primary FD is closed. Input from other FDs will continue to
# be processed until select() runs dry.
flush = False
# The lifetime of the MergedInputStream is bound to the lifetime of
# |primary_fd|.
while primary_fd:
# When not flushing: block until data is read or an exception occurs.
rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
if len(rlist) == 0 and flush:
break
for fileno in xlist:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
# Flush the streams by executing nonblocking reads from the input file
# descriptors until no more data is available, or all the streams are
# closed.
while streams_by_fd:
rlist, _, _ = select.select(streams_by_fd, [], [], 0)
if not rlist:
break
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
def _GetComponentUri(package_name):
return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
package_name)
class RunPackageArgs:
"""RunPackage() configuration arguments structure.
symbolizer_config: A newline delimited list of source files contained
in the package. Omitting this parameter will disable symbolization.
system_logging: If set, connects a system log reader to the target.
"""
def __init__(self):
self.symbolizer_config = None
self.system_logging = False
@staticmethod
def FromCommonArgs(args):
run_package_args = RunPackageArgs()
run_package_args.system_logging = args.include_system_logs
return run_package_args
def _DrainStreamToStdout(stream, quit_event):
"""Outputs the contents of |stream| until |quit_event| is set."""
while not quit_event.is_set():
rlist, _, _ = select.select([ stream ], [], [], 0.1)
if rlist:
line = rlist[0].readline()
if not line:
return
print(line.rstrip())
def RunPackage(output_dir, target, package_paths, package_name,
package_args, args):
"""Installs the Fuchsia package at |package_path| on the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_paths: The paths to the .far packages to be installed.
package_name: The name of the primary package to run.
package_args: The arguments which will be passed to the Fuchsia process.
args: Structure of arguments to configure how the package will be run.
Returns the exit code of the remote package process."""
system_logger = (
_AttachKernelLogReader(target) if args.system_logging else None)
try:
if system_logger:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(
target=
lambda: _DrainStreamToStdout(system_logger.stdout, log_output_quit_event)
)
log_output_thread.daemon = True
log_output_thread.start()
with target.GetAmberRepo():
target.InstallPackage(package_paths)
if system_logger:
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
logging.info('Running application.')
command = ['run', _GetComponentUri(package_name)] + package_args
process = target.RunCommandPiped(
command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if system_logger:
output_stream = MergedInputStream(
[process.stdout, system_logger.stdout]).Start()
else:
output_stream = process.stdout
# Run the log data through the symbolizer process.
output_stream = SymbolizerFilter(output_stream,
BuildIdsPaths(package_paths))
for next_line in output_stream:
print(next_line.rstrip())
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning(
'Process exited with status code %d.' % process.returncode)
finally:
if system_logger:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
system_logger.kill()
return process.returncode
| 2,013 | 0 | 171 |
6a892f8bf2a1e819f349e187a7a5c6c0f795e772 | 728 | py | Python | _scripts/_hacked_labextension.py | deathbeds/wxyz | 9d46375ffa1b5dfe8ed8231d20f7951c0e6172bc | [
"BSD-3-Clause"
] | 23 | 2019-02-25T18:55:17.000Z | 2021-11-14T21:56:40.000Z | _scripts/_hacked_labextension.py | deathbeds/wxyz | 9d46375ffa1b5dfe8ed8231d20f7951c0e6172bc | [
"BSD-3-Clause"
] | 33 | 2019-02-23T08:37:41.000Z | 2022-03-18T23:17:02.000Z | _scripts/_hacked_labextension.py | deathbeds/wxyz | 9d46375ffa1b5dfe8ed8231d20f7951c0e6172bc | [
"BSD-3-Clause"
] | 3 | 2020-03-20T20:32:24.000Z | 2020-09-17T17:56:30.000Z | """ handle PEP 420 implicit namespace packages for labextensions
"""
# pylint: disable=protected-access
import importlib
import sys
from pathlib import Path
from jupyterlab import federated_labextensions
from jupyterlab.labextensions import LabExtensionApp
HERE = Path(__file__).parent
ROOT = HERE.parent
NODE_MODULES = ROOT / "node_modules"
BUILDER = NODE_MODULES / "@jupyterlab" / "builder" / "lib" / "build-labextension.js"
federated_labextensions._get_labextension_metadata = _get_labextension_metadata
main = LabExtensionApp.launch_instance
if __name__ == "__main__":
sys.exit(main())
| 26 | 84 | 0.791209 | """ handle PEP 420 implicit namespace packages for labextensions
"""
# pylint: disable=protected-access
import importlib
import sys
from pathlib import Path
from jupyterlab import federated_labextensions
from jupyterlab.labextensions import LabExtensionApp
HERE = Path(__file__).parent
ROOT = HERE.parent
NODE_MODULES = ROOT / "node_modules"
BUILDER = NODE_MODULES / "@jupyterlab" / "builder" / "lib" / "build-labextension.js"
def _get_labextension_metadata(module):
m = importlib.import_module(module)
return m, m._jupyter_labextension_paths()
federated_labextensions._get_labextension_metadata = _get_labextension_metadata
main = LabExtensionApp.launch_instance
if __name__ == "__main__":
sys.exit(main())
| 104 | 0 | 23 |
58feca2644f73943efdfc0fb4b46b584c12e0cb8 | 1,684 | py | Python | Sender/capture.py | shashanksangu/AV-Encryption-Using-Cellular-Automata | 4433b08bf491f3f9a63f36b77e46c0540ece8c1b | [
"MIT"
] | null | null | null | Sender/capture.py | shashanksangu/AV-Encryption-Using-Cellular-Automata | 4433b08bf491f3f9a63f36b77e46c0540ece8c1b | [
"MIT"
] | null | null | null | Sender/capture.py | shashanksangu/AV-Encryption-Using-Cellular-Automata | 4433b08bf491f3f9a63f36b77e46c0540ece8c1b | [
"MIT"
] | null | null | null | # For faster and efficient mathematical evaluations we use numpy
import numpy as np
# For image proccesing we use openCV
import cv2
# For creating and manipulating files
import os
# the time package provides various time-related functions
import time
# Create a video capture object to capture a video using device(camera) or from an existing video
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc('M','P','E','G')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# Create a directory for storing the frames
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
# Sometimes, cap(video capture object) may not have initialized the capture. In that case cap.read() raises an error.
# So we first check if the capture has been initialized by using the isOpened() method and use the open() method to initilaise the capture(if necessary...)
if not cap.isOpened() :
cap.open()
# log the start time in seconds using the time() function
start = time.time()
currentFrame = 0
while(time.time()-start < 2):
ret, frame = cap.read()
if ret==True:
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Saves image of the current frame in jpg file
name = './data/frame'+str(currentFrame) +'.png'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
| 29.034483 | 155 | 0.686461 | # For faster and efficient mathematical evaluations we use numpy
import numpy as np
# For image proccesing we use openCV
import cv2
# For creating and manipulating files
import os
# the time package provides various time-related functions
import time
# Create a video capture object to capture a video using device(camera) or from an existing video
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc('M','P','E','G')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# Create a directory for storing the frames
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
# Sometimes, cap(video capture object) may not have initialized the capture. In that case cap.read() raises an error.
# So we first check if the capture has been initialized by using the isOpened() method and use the open() method to initilaise the capture(if necessary...)
if not cap.isOpened() :
cap.open()
# log the start time in seconds using the time() function
start = time.time()
currentFrame = 0
while(time.time()-start < 2):
ret, frame = cap.read()
if ret==True:
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Saves image of the current frame in jpg file
name = './data/frame'+str(currentFrame) +'.png'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
ce62cf755c6bddb1eed51d9760feab185da1392c | 1,723 | py | Python | make_heatmaps.py | aas-integration/integration-test2 | dc9a9b4593cd59841f0d8348056cbff80a9c2a21 | [
"MIT"
] | 3 | 2016-10-10T20:18:51.000Z | 2018-05-01T19:42:10.000Z | make_heatmaps.py | aas-integration/integration-test2 | dc9a9b4593cd59841f0d8348056cbff80a9c2a21 | [
"MIT"
] | 38 | 2016-08-22T03:20:25.000Z | 2018-06-11T19:13:05.000Z | make_heatmaps.py | aas-integration/integration-test2 | dc9a9b4593cd59841f0d8348056cbff80a9c2a21 | [
"MIT"
] | 7 | 2016-08-29T17:37:42.000Z | 2022-01-28T00:30:10.000Z | import sys, os, shutil
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
if __name__ == '__main__':
main(sys.argv[1])
| 25.338235 | 80 | 0.596634 | import sys, os, shutil
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
def main(file_name):
print("Opening {}".format(file_name))
w,h = 10, 10
matrix = [[0 for x in range(w)] for y in range(h)]
with open(file_name, 'r') as f:
content = f.readlines()
if len(content)!=2:
print ("Bad file format")
return
semantic_sim = [i.strip() for i in content[0].split(',')]
syntactic_sim = [i.strip() for i in content[1].split(',')]
print("Number of points: {}".format(len(semantic_sim)))
if len(semantic_sim)!=len(syntactic_sim):
print ("Bad file format")
return
max_c = 0
for i in range(len(semantic_sim)):
x = int(float(semantic_sim[i])*float(w-1))
y = int(float(syntactic_sim[i])*float(h-1))
matrix[x][y] += 1
max_c = max(max_c, matrix[x][y])
# sample to a matrix of size 100x100
imgdata = [[0 for x in range(100)] for y in range(100)]
for x in range(100):
for y in range(100):
imgdata[x][y] = matrix[int(float(x)/100.0 * w)][int(float(y)/100.0 * h)]
plt.imshow(imgdata, cmap='hot', interpolation='nearest')
plt.colorbar()
plt.gca().invert_yaxis()
plt.ylabel('name similarity')
plt.xlabel('semantic similarity')
plt.savefig(file_name.replace('.txt', '.pdf'))
#plt.show()
# img = Image.new( 'RGB', (w,h), "white") # create a new black image
# pixels = img.load() # create the pixel map
# for x in range(w):
# for y in range(h):
# c = 255 - int(float(matrix[x][y])*recolor_factor)
# pixels[x,y] = (c,c,c)
# img.show()
if __name__ == '__main__':
main(sys.argv[1])
| 1,516 | 0 | 23 |
49b941ace7988d588a69482a493fa7beee3c31d8 | 2,978 | py | Python | HLTriggerOffline/Common/test/hltSourceHarvestCompare_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | HLTriggerOffline/Common/test/hltSourceHarvestCompare_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | HLTriggerOffline/Common/test/hltSourceHarvestCompare_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("HLTCOMPARE")
process.load("HLTriggerOffline.Common.HLTValidation_cff")
process.load("HLTriggerOffline.Common.HLTValidationHarvest_cff")
process.load("HLTriggerOffline.Common.HLTValidationQT_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("Configuration.StandardSequences.L1TriggerDefaultMenu_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load("DQMServices.Components.EDMtoMEConverter_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("")
)
#process.dqmSaver.convention = 'RelVal'
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
process.dqmSaver.workflow = "/CMSSW_3_1_0/RelVal/TrigVal"
#process.dqmSaver.referenceHandling = cms.untracked.string('skip')
process.dqmSaver.referenceHandling = cms.untracked.string('all')
process.DQMStore.verbose=0
#"/build/nuno/test/CMSSW_3_1_X_2009-02-05-0000/src/HltReference.root"
process.options = cms.untracked.PSet(
fileMode = cms.untracked.string('FULLMERGE')
)
process.MessageLogger.categories.append('DQMFileSaver')
process.MessageLogger.cout.DQMFileSaver = cms.untracked.PSet(
limit = cms.untracked.int32(1000000)
)
process.MessageLogger.cerr.DQMFileSaver = cms.untracked.PSet(
limit = cms.untracked.int32(1000000)
)
process.source.fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_1_0_pre4/RelValTTbar/GEN-SIM-RECO/STARTUP_30X_v1/0001/A42D4BC9-8C16-DE11-8767-003048678B00.root'
)
process.validation = cms.Path(
process.hltvalidation
# process.HLTMuonVal
# process.muonTriggerRateTimeAnalyzer
#+process.HLTTauVal
#+process.egammaValidationSequence
#+process.HLTSusyExoVal
#+process.heavyFlavorValidationSequence
#+process.HLTJetMETValSeq
#+process.HLTAlCaVal
)
process.post_validation = cms.Path(
process.hltpostvalidation
# process.HLTMuonPostVal
#+process.HLTTauPostVal
#+process.EgammaPostVal
#+process.SusyExoPostVal
#+process.heavyFlavorValidationHarvestingSequence
#+process.JetMETPostVal
#+process.HLTAlCaPostVal
)
process.qt_validation = cms.Path(
process.hltvalidationqt
)
process.edmtome = cms.Path(process.EDMtoMEConverter)
process.saver = cms.Path(process.dqmSaver)
process.schedule = cms.Schedule(
process.validation,
process.edmtome,
process.post_validation,
process.qt_validation,
process.saver
)
for filter in (getattr(process,f) for f in process.filters_()):
if hasattr(filter,"outputFile"):
filter.outputFile=""
| 31.680851 | 119 | 0.775353 | import FWCore.ParameterSet.Config as cms
process = cms.Process("HLTCOMPARE")
process.load("HLTriggerOffline.Common.HLTValidation_cff")
process.load("HLTriggerOffline.Common.HLTValidationHarvest_cff")
process.load("HLTriggerOffline.Common.HLTValidationQT_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("Configuration.StandardSequences.L1TriggerDefaultMenu_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load("DQMServices.Components.EDMtoMEConverter_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("")
)
#process.dqmSaver.convention = 'RelVal'
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
process.dqmSaver.workflow = "/CMSSW_3_1_0/RelVal/TrigVal"
#process.dqmSaver.referenceHandling = cms.untracked.string('skip')
process.dqmSaver.referenceHandling = cms.untracked.string('all')
process.DQMStore.verbose=0
#"/build/nuno/test/CMSSW_3_1_X_2009-02-05-0000/src/HltReference.root"
process.options = cms.untracked.PSet(
fileMode = cms.untracked.string('FULLMERGE')
)
process.MessageLogger.categories.append('DQMFileSaver')
process.MessageLogger.cout.DQMFileSaver = cms.untracked.PSet(
limit = cms.untracked.int32(1000000)
)
process.MessageLogger.cerr.DQMFileSaver = cms.untracked.PSet(
limit = cms.untracked.int32(1000000)
)
process.source.fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_1_0_pre4/RelValTTbar/GEN-SIM-RECO/STARTUP_30X_v1/0001/A42D4BC9-8C16-DE11-8767-003048678B00.root'
)
process.validation = cms.Path(
process.hltvalidation
# process.HLTMuonVal
# process.muonTriggerRateTimeAnalyzer
#+process.HLTTauVal
#+process.egammaValidationSequence
#+process.HLTSusyExoVal
#+process.heavyFlavorValidationSequence
#+process.HLTJetMETValSeq
#+process.HLTAlCaVal
)
process.post_validation = cms.Path(
process.hltpostvalidation
# process.HLTMuonPostVal
#+process.HLTTauPostVal
#+process.EgammaPostVal
#+process.SusyExoPostVal
#+process.heavyFlavorValidationHarvestingSequence
#+process.JetMETPostVal
#+process.HLTAlCaPostVal
)
process.qt_validation = cms.Path(
process.hltvalidationqt
)
process.edmtome = cms.Path(process.EDMtoMEConverter)
process.saver = cms.Path(process.dqmSaver)
process.schedule = cms.Schedule(
process.validation,
process.edmtome,
process.post_validation,
process.qt_validation,
process.saver
)
for filter in (getattr(process,f) for f in process.filters_()):
if hasattr(filter,"outputFile"):
filter.outputFile=""
| 0 | 0 | 0 |
627ad4d425398892b9e9e46ff6f0a4cbfad92daf | 8,892 | py | Python | app/user/tests/test_user_api.py | tahmadvand/recipe_app_api | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | tahmadvand/recipe_app_api | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | tahmadvand/recipe_app_api | 40b4cc6960d7dc4858373b5f6ccca980ed0eeac8 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
# so we can generate our API URL.
# rest framework test helper tools:
from rest_framework.test import APIClient
# test client that we can use to make requests to our API and
# then check what the response is.
from rest_framework import status
# a module that contains some status codes that we can see in
# basically human readable form so instead of just typing 200 it's
# HTTP 200 ok it just makes the tests a little bit easier to
# read and understand.
# add a helper function or a
# constant variable for our URL that we're going to be testing
# so we're be testing the create user URL:
CREATE_USER_URL = reverse('user:create')
# create the user
# create URL and assign it to this create user URL variable.
TOKEN_URL = reverse('user:token')
# this is going to be the URL that we're going to use to make
# the HTTP POST request to generate our token.
ME_URL = reverse('user:me')
# account of the user who is authenticated
def create_user(**params):
# **: dynamic list of arguments.
# we can basically add as many arguments as we want
"""Helper function to create new user that you're testing with"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)(unauthenticated)"""
# this just makes it a little easier
# to call our client in our test so every single test
# we run we don't need to manually create this API client
# we just have one client for our test suite that we can
# reuse for all of the tests.
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
# payload is the object that you pass to
# the API when you make the request
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'name',
}
res = self.client.post(CREATE_USER_URL, payload)
# make request
# do a HTTP POST request to our client
# to our URL for creating users
# test that the outcome is what we expect: http 201 is created
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# test that the object is actually created
user = get_user_model().objects.get(**res.data)
# here is we can unwind the response for this because when
# we do a HTTP POST and create a user we expect to see the
# created user object returned in the API along with this
# HTTP_201_created So if we do **res.data then it will take
# the dictionary response which should look very similar
# to this but it should have an added ID field We take the
# res.data and we just pass it in as the parameters for the
# get then if this gets the user successfully then we know
# that the user is actually being created properly.
self.assertTrue(
user.check_password(payload['password'])
# test our password is correct
)
self.assertNotIn('password', res.data)
# we don't want the password
# being returned in the request because it is a potential
# security vulnerability.
# password shouldn't be returned when we return our user
def test_user_exists(self):
"""Test creating a user that already exists failure"""
payload = {'email': 'test@londonappdev.com',
'password': 'testpass', 'name': 'Test'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# make sure it's a bad request because the user already exists
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {'email': 'test@londonappdev.com',
'password': 'pw', 'name': 'Test'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
# if the user exists it will return true otherwise it will return false
self.assertFalse(user_exists)
# every single test that runs it refreshes the database
# so these users that were created in this test are not going to
# be accessible in this test so each test it basically starts anew
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
# response
# make a request to post payload to our token_url
self.assertIn('token', res.data)
# checks that there is a key called token in the
# response.data that we get back.
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@londonappdev.com', password='testpass')
payload = {'email': 'test@londonappdev.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# because the password is wrong
def test_create_token_no_user(self):
"""Test that token is not created if user doens't exist"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# because each test it resets the database
# from scratch we don't need to worry about the fact that we created
# the user in this test because this test is going to run isolated
# from this test and the user won't exist by the time we start this test.
# test that authentication is required for the endpoint.
# make sure that after any changes that you make those api's will
# always be private
def test_retrieve_user_unauthorized(self):
"""Test that authentication required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
# retrieve profile successful.
# private means that authentication is required before
# you can use these endpoints.
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
# we don't need to basically set the authentication every single test we're
# just doing the setup and then that happens automatically before each test.
# test that we can retrieve the profile of the logged in user.
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
# test that you cannot do a HTTP POST request on the profile.
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
res = self.client.post(ME_URL, {})
# we'll just post the empty object here to test it.
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# our user profile update test.
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 40.976959 | 77 | 0.681062 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
# so we can generate our API URL.
# rest framework test helper tools:
from rest_framework.test import APIClient
# test client that we can use to make requests to our API and
# then check what the response is.
from rest_framework import status
# a module that contains some status codes that we can see in
# basically human readable form so instead of just typing 200 it's
# HTTP 200 ok it just makes the tests a little bit easier to
# read and understand.
# add a helper function or a
# constant variable for our URL that we're going to be testing
# so we're be testing the create user URL:
CREATE_USER_URL = reverse('user:create')
# create the user
# create URL and assign it to this create user URL variable.
TOKEN_URL = reverse('user:token')
# this is going to be the URL that we're going to use to make
# the HTTP POST request to generate our token.
ME_URL = reverse('user:me')
# account of the user who is authenticated
def create_user(**params):
# **: dynamic list of arguments.
# we can basically add as many arguments as we want
"""Helper function to create new user that you're testing with"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)(unauthenticated)"""
def setUp(self):
self.client = APIClient()
# this just makes it a little easier
# to call our client in our test so every single test
# we run we don't need to manually create this API client
# we just have one client for our test suite that we can
# reuse for all of the tests.
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
# payload is the object that you pass to
# the API when you make the request
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'name',
}
res = self.client.post(CREATE_USER_URL, payload)
# make request
# do a HTTP POST request to our client
# to our URL for creating users
# test that the outcome is what we expect: http 201 is created
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# test that the object is actually created
user = get_user_model().objects.get(**res.data)
# here is we can unwind the response for this because when
# we do a HTTP POST and create a user we expect to see the
# created user object returned in the API along with this
# HTTP_201_created So if we do **res.data then it will take
# the dictionary response which should look very similar
# to this but it should have an added ID field We take the
# res.data and we just pass it in as the parameters for the
# get then if this gets the user successfully then we know
# that the user is actually being created properly.
self.assertTrue(
user.check_password(payload['password'])
# test our password is correct
)
self.assertNotIn('password', res.data)
# we don't want the password
# being returned in the request because it is a potential
# security vulnerability.
# password shouldn't be returned when we return our user
def test_user_exists(self):
"""Test creating a user that already exists failure"""
payload = {'email': 'test@londonappdev.com',
'password': 'testpass', 'name': 'Test'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# make sure it's a bad request because the user already exists
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {'email': 'test@londonappdev.com',
'password': 'pw', 'name': 'Test'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
# if the user exists it will return true otherwise it will return false
self.assertFalse(user_exists)
# every single test that runs it refreshes the database
# so these users that were created in this test are not going to
# be accessible in this test so each test it basically starts anew
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
# response
# make a request to post payload to our token_url
self.assertIn('token', res.data)
# checks that there is a key called token in the
# response.data that we get back.
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@londonappdev.com', password='testpass')
payload = {'email': 'test@londonappdev.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# because the password is wrong
def test_create_token_no_user(self):
"""Test that token is not created if user doens't exist"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# because each test it resets the database
# from scratch we don't need to worry about the fact that we created
# the user in this test because this test is going to run isolated
# from this test and the user won't exist by the time we start this test.
# test that authentication is required for the endpoint.
# make sure that after any changes that you make those api's will
# always be private
def test_retrieve_user_unauthorized(self):
"""Test that authentication required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
# retrieve profile successful.
# private means that authentication is required before
# you can use these endpoints.
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
# we don't need to basically set the authentication every single test we're
# just doing the setup and then that happens automatically before each test.
def setUp(self):
self.user = create_user(
email='test@londonappdev.com',
password='testpass',
name='fname',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
# force_authenticate is a helper
# function that basically just makes it really easy to simulate or
# making authenticated requests.
# test that we can retrieve the profile of the logged in user.
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
# test that you cannot do a HTTP POST request on the profile.
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
res = self.client.post(ME_URL, {})
# we'll just post the empty object here to test it.
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
# our user profile update test.
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 415 | 0 | 53 |
2b24205d366a77d862af2dc0a0f3fb4826fa970e | 860 | py | Python | xrypto/markets/_okcoin.py | AlphaPerfect/Xrypto | fcb7b3b79c1f5ec0393d1795bcfa6556fd3e0dd9 | [
"Unlicense"
] | 10 | 2018-03-24T23:14:13.000Z | 2020-04-22T15:56:05.000Z | xrypto/markets/_okcoin.py | a04512/crypto-raven | e8ebba319f8f04314b4c21d6bc4c8901bf358281 | [
"Unlicense"
] | null | null | null | xrypto/markets/_okcoin.py | a04512/crypto-raven | e8ebba319f8f04314b4c21d6bc4c8901bf358281 | [
"Unlicense"
] | 10 | 2020-01-29T05:43:33.000Z | 2021-12-13T07:02:39.000Z | # Copyright (C) 2017, Philsong <songbohr@gmail.com>
import json
import config
from .market import Market
from exchanges.okcoin.OkcoinSpotAPI import OKCoinSpot | 31.851852 | 76 | 0.680233 | # Copyright (C) 2017, Philsong <songbohr@gmail.com>
import json
import config
from .market import Market
from exchanges.okcoin.OkcoinSpotAPI import OKCoinSpot
class OKCoin(Market):
def __init__(self, pair_code):
base_currency, market_currency = self.get_tradeable_pairs(pair_code)
super().__init__(base_currency, market_currency, pair_code, 0.002)
self.client = OKCoinSpot(None, None)
self.event = 'okcoin_depth'
self.subscribe_depth()
def update_depth(self):
raw_depth = self.client.depth(symbol=self.pair_code)
self.depth = self.format_depth(raw_depth)
def get_tradeable_pairs(self, pair_code):
if pair_code == 'btc_cny':
base_currency = 'CNY'
market_currency = 'BTC'
else:
assert(False)
return base_currency, market_currency | 598 | 0 | 103 |
16054c2df1e0f034497918caea82be5280e888cd | 4,839 | py | Python | qcfractal/storage_sockets/view.py | MolSSI/dqm_server | ceff64fe032590095e0f865bc1d0c2da4684404e | [
"BSD-3-Clause"
] | 113 | 2018-08-04T20:33:41.000Z | 2022-02-08T21:17:52.000Z | qcfractal/storage_sockets/view.py | doaa-altarawy/QCFractal | 5f00dd06bb34ca912c4055f0cbac60863ea89c7f | [
"BSD-3-Clause"
] | 665 | 2018-08-04T14:16:53.000Z | 2022-03-25T15:37:41.000Z | qcfractal/storage_sockets/view.py | doaa-altarawy/QCFractal | 5f00dd06bb34ca912c4055f0cbac60863ea89c7f | [
"BSD-3-Clause"
] | 40 | 2018-08-16T21:41:02.000Z | 2022-01-26T15:07:06.000Z | import io
import pathlib
from typing import Any, Dict, Union
import numpy as np
import pandas as pd
from qcelemental.util.serialization import serialize
from ..interface.collections import HDF5View
| 32.695946 | 104 | 0.563959 | import io
import pathlib
from typing import Any, Dict, Union
import numpy as np
import pandas as pd
from qcelemental.util.serialization import serialize
from ..interface.collections import HDF5View
class ViewHandler:
def __init__(self, path: Union[str, pathlib.Path]) -> None:
"""
Parameters
----------
path: Union[str, Path]
Directory containing dataset views
"""
self._view_cache: Dict[int, HDF5View] = {}
self._path = pathlib.Path(path)
if not self._path.is_dir():
raise ValueError(f"Path in ViewHandler must be a directory, got: {self._path}")
def view_path(self, collection_id: int) -> pathlib.Path:
"""
Returns the path to a view corresponding to a collection identified by an id.
Parameters
----------
collection_id: int
Collection id corresponding to view
Returns
-------
pathlib.Path
Path of requested view
"""
return self._path / f"{collection_id}.hdf5"
def view_exists(self, collection_id: int) -> bool:
"""
Checks if view corresponding to a collection exists.
Parameters
----------
collection_id: int
Collection id corresponding to view
Returns
-------
bool
Does the view exist?
"""
return self.view_path(collection_id).is_file()
def _get_view(self, collection_id: int):
if collection_id not in self._view_cache:
if not self.view_exists(collection_id):
raise IOError
self._view_cache[collection_id] = HDF5View(self.view_path(collection_id))
return self._view_cache[collection_id]
def handle_request(self, collection_id: int, request: str, model: Dict[str, Any]) -> Dict[str, Any]:
"""
Handles REST requests related to views. This function implements the GET endpoint
/collections/[collection_id]/view/[request]
Parameters
----------
collection_id: int
Collection id corresponding to a view.
request: str
Requested data. Allowed options and corresponding DatasetView methods:
- list: list_values
- value: get_values
- molecule: get_molecules
- entry: get_entries
model:
REST model containing input options.
Returns
-------
Dict[str, Any]:
Dictionary corresponding to requested REST model
"""
meta = {"errors": [], "success": False, "error_description": False, "msgpacked_cols": []}
try:
view = self._get_view(collection_id)
except IOError:
meta["success"] = False
meta["error_description"] = f"View not available for collection #{collection_id}"
return {"meta": meta, "data": None}
if request == "entry":
try:
df = view.get_entries(subset=model["subset"])
except KeyError:
meta["success"] = False
meta["error_description"] = "Unable to find requested entry."
return {"meta": meta, "data": None}
elif request == "molecule":
series = view.get_molecules(model["indexes"], keep_serialized=True)
df = pd.DataFrame({"molecule": series})
df.reset_index(inplace=True)
meta["msgpacked_cols"].append("molecule")
elif request == "value":
df, units = view.get_values(model["queries"], subset=model["subset"])
df.reset_index(inplace=True)
elif request == "list":
df = view.list_values()
else:
meta["success"] = False
meta["error_description"] = f"Unknown view request: {request}."
return {"meta": meta, "data": None}
# msgpack columns not supported by pyarrow
pack_columns = []
for col in df.columns:
if len(df) > 0:
sample = df[col].iloc[0]
if isinstance(sample, np.ndarray):
pack_columns.append(col)
elif isinstance(sample, list):
pack_columns.append(col)
# Add any other datatypes that need to be handled specially go here
for col in pack_columns:
df[col] = df[col].apply(lambda x: serialize(x, "msgpack-ext"))
meta["msgpacked_cols"] += pack_columns
# serialize
f = io.BytesIO()
df.to_feather(f)
df_feather = f.getvalue()
if request == "value":
data = {"values": df_feather, "units": units}
else:
data = df_feather
meta["success"] = True
return {"meta": meta, "data": data}
| 284 | 4,331 | 23 |
f30ab0f1c8eb1930ac3969b6e83e2d1a3c31f0a3 | 8,265 | py | Python | Demo/sockets/gopher.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/sockets/gopher.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/sockets/gopher.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | #! /usr/local/bin/python
# A simple gopher client.
#
# Usage: gopher [ [selector] host [port] ]
import string
import sys
import os
import socket
# Default selector, host and port
DEF_SELECTOR = ''
DEF_HOST = 'gopher.micro.umn.edu'
DEF_PORT = 70
# Recognized file types
T_TEXTFILE = '0'
T_MENU = '1'
T_CSO = '2'
T_ERROR = '3'
T_BINHEX = '4'
T_DOS = '5'
T_UUENCODE = '6'
T_SEARCH = '7'
T_TELNET = '8'
T_BINARY = '9'
T_REDUNDANT = '+'
T_SOUND = 's'
# Dictionary mapping types to strings
typename = {'0': '<TEXT>', '1': '<DIR>', '2': '<CSO>', '3': '<ERROR>', \
'4': '<BINHEX>', '5': '<DOS>', '6': '<UUENCODE>', '7': '<SEARCH>', \
'8': '<TELNET>', '9': '<BINARY>', '+': '<REDUNDANT>', 's': '<SOUND>'}
# Oft-used characters and strings
CRLF = '\r\n'
TAB = '\t'
# Open a TCP connection to a given host and port
# Send a selector to a given host and port, return a file with the reply
# Get a menu in the form of a list of entries
# Get a text file as a list of lines, with trailing CRLF stripped
# Get a text file and pass each line to a function, with trailing CRLF stripped
# Get a binary file as one solid data block
# Get a binary file and pass each block to a function
# A *very* simple interactive browser
# Browser main command, has default arguments
# Browse a menu
# Browse a text file
# Browse a search index
# "Browse" telnet-based information, i.e. open a telnet session
# "Browse" a binary file, i.e. save it to a file
# "Browse" a sound file, i.e. play it or save it
# Dictionary mapping types to browser functions
typebrowser = {'0': browse_textfile, '1': browse_menu, \
'4': browse_binary, '5': browse_binary, '6': browse_textfile, \
'7': browse_search, \
'8': browse_telnet, '9': browse_binary, 's': browse_sound}
# Class used to save lines, appending a newline to each line
# Class used to save data while showing progress
# Ask for and open a save file, or return None if not to save
# Test program
# Call the test program as a main program
test()
| 23.75 | 79 | 0.641379 | #! /usr/local/bin/python
# A simple gopher client.
#
# Usage: gopher [ [selector] host [port] ]
import string
import sys
import os
import socket
# Default selector, host and port
DEF_SELECTOR = ''
DEF_HOST = 'gopher.micro.umn.edu'
DEF_PORT = 70
# Recognized file types
T_TEXTFILE = '0'
T_MENU = '1'
T_CSO = '2'
T_ERROR = '3'
T_BINHEX = '4'
T_DOS = '5'
T_UUENCODE = '6'
T_SEARCH = '7'
T_TELNET = '8'
T_BINARY = '9'
T_REDUNDANT = '+'
T_SOUND = 's'
# Dictionary mapping types to strings
typename = {'0': '<TEXT>', '1': '<DIR>', '2': '<CSO>', '3': '<ERROR>', \
'4': '<BINHEX>', '5': '<DOS>', '6': '<UUENCODE>', '7': '<SEARCH>', \
'8': '<TELNET>', '9': '<BINARY>', '+': '<REDUNDANT>', 's': '<SOUND>'}
# Oft-used characters and strings
CRLF = '\r\n'
TAB = '\t'
# Open a TCP connection to a given host and port
def open_socket(host, port):
if not port:
port = DEF_PORT
elif type(port) == type(''):
port = string.atoi(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
return s
# Send a selector to a given host and port, return a file with the reply
def send_request(selector, host, port):
s = open_socket(host, port)
s.send(selector + CRLF)
s.shutdown(1)
return s.makefile('r')
# Get a menu in the form of a list of entries
def get_menu(selector, host, port):
f = send_request(selector, host, port)
list = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
typechar = line[0]
parts = string.splitfields(line[1:], TAB)
if len(parts) < 4:
print '(Bad line from server:', `line`, ')'
continue
if len(parts) > 4:
print '(Extra info from server:', parts[4:], ')'
parts.insert(0, typechar)
list.append(parts)
f.close()
return list
# Get a text file as a list of lines, with trailing CRLF stripped
def get_textfile(selector, host, port):
list = []
get_alt_textfile(selector, host, port, list.append)
return list
# Get a text file and pass each line to a function, with trailing CRLF stripped
def get_alt_textfile(selector, host, port, func):
f = send_request(selector, host, port)
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if line[:2] == '..':
line = line[1:]
func(line)
f.close()
# Get a binary file as one solid data block
def get_binary(selector, host, port):
f = send_request(selector, host, port)
data = f.read()
f.close()
return data
# Get a binary file and pass each block to a function
def get_alt_binary(selector, host, port, func, blocksize):
f = send_request(selector, host, port)
while 1:
data = f.read(blocksize)
if not data:
break
func(data)
# A *very* simple interactive browser
# Browser main command, has default arguments
def browser(*args):
selector = DEF_SELECTOR
host = DEF_HOST
port = DEF_PORT
n = len(args)
if n > 0 and args[0]:
selector = args[0]
if n > 1 and args[1]:
host = args[1]
if n > 2 and args[2]:
port = args[2]
if n > 3:
raise RuntimeError, 'too many args'
try:
browse_menu(selector, host, port)
except socket.error, msg:
print 'Socket error:', msg
sys.exit(1)
except KeyboardInterrupt:
print '\n[Goodbye]'
# Browse a menu
def browse_menu(selector, host, port):
list = get_menu(selector, host, port)
while 1:
print '----- MENU -----'
print 'Selector:', `selector`
print 'Host:', host, ' Port:', port
print
for i in range(len(list)):
item = list[i]
typechar, description = item[0], item[1]
print string.rjust(`i+1`, 3) + ':', description,
if typename.has_key(typechar):
print typename[typechar]
else:
print '<TYPE=' + `typechar` + '>'
print
while 1:
try:
str = raw_input('Choice [CR == up a level]: ')
except EOFError:
print
return
if not str:
return
try:
choice = string.atoi(str)
except string.atoi_error:
print 'Choice must be a number; try again:'
continue
if not 0 < choice <= len(list):
print 'Choice out of range; try again:'
continue
break
item = list[choice-1]
typechar = item[0]
[i_selector, i_host, i_port] = item[2:5]
if typebrowser.has_key(typechar):
browserfunc = typebrowser[typechar]
try:
browserfunc(i_selector, i_host, i_port)
except (IOError, socket.error):
print '***', sys.exc_type, ':', sys.exc_value
else:
print 'Unsupported object type'
# Browse a text file
def browse_textfile(selector, host, port):
x = None
try:
p = os.popen('${PAGER-more}', 'w')
x = SaveLines(p)
get_alt_textfile(selector, host, port, x.writeln)
except IOError, msg:
print 'IOError:', msg
if x:
x.close()
f = open_savefile()
if not f:
return
x = SaveLines(f)
try:
get_alt_textfile(selector, host, port, x.writeln)
print 'Done.'
except IOError, msg:
print 'IOError:', msg
x.close()
# Browse a search index
def browse_search(selector, host, port):
while 1:
print '----- SEARCH -----'
print 'Selector:', `selector`
print 'Host:', host, ' Port:', port
print
try:
query = raw_input('Query [CR == up a level]: ')
except EOFError:
print
break
query = string.strip(query)
if not query:
break
if '\t' in query:
print 'Sorry, queries cannot contain tabs'
continue
browse_menu(selector + TAB + query, host, port)
# "Browse" telnet-based information, i.e. open a telnet session
def browse_telnet(selector, host, port):
if selector:
print 'Log in as', `selector`
if type(port) <> type(''):
port = `port`
sts = os.system('set -x; exec telnet ' + host + ' ' + port)
if sts:
print 'Exit status:', sts
# "Browse" a binary file, i.e. save it to a file
def browse_binary(selector, host, port):
f = open_savefile()
if not f:
return
x = SaveWithProgress(f)
get_alt_binary(selector, host, port, x.write, 8*1024)
x.close()
# "Browse" a sound file, i.e. play it or save it
def browse_sound(selector, host, port):
browse_binary(selector, host, port)
# Dictionary mapping types to browser functions
typebrowser = {'0': browse_textfile, '1': browse_menu, \
'4': browse_binary, '5': browse_binary, '6': browse_textfile, \
'7': browse_search, \
'8': browse_telnet, '9': browse_binary, 's': browse_sound}
# Class used to save lines, appending a newline to each line
class SaveLines:
def __init__(self, f):
self.f = f
def writeln(self, line):
self.f.write(line + '\n')
def close(self):
sts = self.f.close()
if sts:
print 'Exit status:', sts
# Class used to save data while showing progress
class SaveWithProgress:
def __init__(self, f):
self.f = f
def write(self, data):
sys.stdout.write('#')
sys.stdout.flush()
self.f.write(data)
def close(self):
print
sts = self.f.close()
if sts:
print 'Exit status:', sts
# Ask for and open a save file, or return None if not to save
def open_savefile():
try:
savefile = raw_input( \
'Save as file [CR == don\'t save; |pipeline or ~user/... OK]: ')
except EOFError:
print
return None
savefile = string.strip(savefile)
if not savefile:
return None
if savefile[0] == '|':
cmd = string.strip(savefile[1:])
try:
p = os.popen(cmd, 'w')
except IOError, msg:
print `cmd`, ':', msg
return None
print 'Piping through', `cmd`, '...'
return p
if savefile[0] == '~':
savefile = os.path.expanduser(savefile)
try:
f = open(savefile, 'w')
except IOError, msg:
print `savefile`, ':', msg
return None
print 'Saving to', `savefile`, '...'
return f
# Test program
def test():
if sys.argv[4:]:
print 'usage: gopher [ [selector] host [port] ]'
sys.exit(2)
elif sys.argv[3:]:
browser(sys.argv[1], sys.argv[2], sys.argv[3])
elif sys.argv[2:]:
try:
port = string.atoi(sys.argv[2])
selector = ''
host = sys.argv[1]
except string.atoi_error:
selector = sys.argv[1]
host = sys.argv[2]
port = ''
browser(selector, host, port)
elif sys.argv[1:]:
browser('', sys.argv[1])
else:
browser()
# Call the test program as a main program
test()
| 5,695 | -3 | 534 |
c0d47a1eae91a5eef6747a5c4afebfeec0f733b0 | 1,727 | py | Python | BinarySearchTree/easy/isSymmetric.py | linminhtoo/algorithms | 884422a7c9f531e7ccaae03ba1ccbd6966b23dd3 | [
"MIT"
] | null | null | null | BinarySearchTree/easy/isSymmetric.py | linminhtoo/algorithms | 884422a7c9f531e7ccaae03ba1ccbd6966b23dd3 | [
"MIT"
] | null | null | null | BinarySearchTree/easy/isSymmetric.py | linminhtoo/algorithms | 884422a7c9f531e7ccaae03ba1ccbd6966b23dd3 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/symmetric-tree/solution/
# Definition for a binary tree node.
# recursive, by comparing two copies of the tree
from collections import deque
# level order traversal
# just need to handle Null nodes carefully | 31.981481 | 57 | 0.49392 | # https://leetcode.com/problems/symmetric-tree/solution/
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution_recursive:
# recursive, by comparing two copies of the tree
def isSymmetric(self, root: TreeNode) -> bool:
def helper(t1, t2):
# compare mirror image of tree against itself
if not t1 and not t2: return True
if not t1 or not t2: return False
return (
t1.val == t2.val and \
helper(t1.left, t2.right) and \
helper(t1.right, t2.left)
)
return helper(root, root)
from collections import deque
class Solution_iterative:
# level order traversal
# just need to handle Null nodes carefully
def isSymmetric(self, root: TreeNode) -> bool:
if not root:
return True
q = deque([root])
while q:
level_count = len(q)
level_nodes = []
while level_count:
node = q.popleft()
if node:
level_nodes.append(node.val)
if node.left:
q.append(node.left)
else:
q.append(None)
if node.right:
q.append(node.right)
else:
q.append(None)
else:
level_nodes.append(None)
level_count -= 1
if level_nodes != level_nodes[::-1]:
return False
return True | 1,328 | 2 | 145 |
1c6a2903e1d499c805f3066155cb0832c1308d7d | 817 | py | Python | steps/step18.py | choiking10/mytorch | 67140b608b14e2ec6ecca1638705af91d2d71b6b | [
"MIT"
] | null | null | null | steps/step18.py | choiking10/mytorch | 67140b608b14e2ec6ecca1638705af91d2d71b6b | [
"MIT"
] | null | null | null | steps/step18.py | choiking10/mytorch | 67140b608b14e2ec6ecca1638705af91d2d71b6b | [
"MIT"
] | null | null | null | import contextlib
import numpy as np
import mytorch
import mytorch.simple_core
from mytorch.simple_core import Variable, square, add
ex1()
ex2()
ex3()
| 17.382979 | 53 | 0.580171 | import contextlib
import numpy as np
import mytorch
import mytorch.simple_core
from mytorch.simple_core import Variable, square, add
def ex1():
x0 = Variable(np.array(2.0))
x1 = Variable(np.array(3.0))
t = add(x0, x1)
y = add(x0, t)
y.backward()
print(y.grad, t.grad)
print(x0.grad, x1.grad)
def ex2():
@contextlib.contextmanager
def config_test():
print('start') # 전처리
try:
yield
finally:
print('done')
with config_test():
print('process...')
def ex3():
with mytorch.simple_core.no_grad():
x = Variable(np.array(2.0))
y = square(x)
print("no_grad generation", y.creator)
x = Variable(np.array(2.0))
y = square(x)
print("grad generation", y.creator)
ex1()
ex2()
ex3()
| 596 | 0 | 69 |
7c909e58212423f8947dc24e1ea8efe3adf63239 | 9,174 | py | Python | nlpir/doc_extractor.py | yangyaofei/nlpir-python | af7bca9e2c3ba5f07316364da8f5e46a2bbc7c52 | [
"MIT"
] | 3 | 2020-10-13T08:42:26.000Z | 2020-10-23T05:29:05.000Z | nlpir/doc_extractor.py | yangyaofei/nlpir-python | af7bca9e2c3ba5f07316364da8f5e46a2bbc7c52 | [
"MIT"
] | null | null | null | nlpir/doc_extractor.py | yangyaofei/nlpir-python | af7bca9e2c3ba5f07316364da8f5e46a2bbc7c52 | [
"MIT"
] | null | null | null | #! coding=utf-8
"""
high-level toolbox for Document Extractor
"""
import re
import typing
import nlpir
from nlpir import get_instance as __get_instance__
from nlpir import native
# class and class instance
__cls__ = native.doc_extractor.DocExtractor
__instance__: typing.Optional[native.doc_extractor.DocExtractor] = None
# Location of DLL
__lib__ = None
# Data directory
__data__ = None
# license_code
__license_code__ = None
# encode
__nlpir_encode__ = native.UTF8_CODE
class ExtractResult:
"""
A class for retrieve result from Document Extractor's handle
"""
#: Types map can be retrieved from DocExtractor
retrieve_type_map: typing.Dict[str, int] = {
"person": native.doc_extractor.DOC_EXTRACT_TYPE_PERSON,
"location": native.doc_extractor.DOC_EXTRACT_TYPE_LOCATION,
"organization": native.doc_extractor.DOC_EXTRACT_TYPE_ORGANIZATION,
"keyword": native.doc_extractor.DOC_EXTRACT_TYPE_KEYWORD,
"author": native.doc_extractor.DOC_EXTRACT_TYPE_AUTHOR,
"media": native.doc_extractor.DOC_EXTRACT_TYPE_MEDIA,
"country": native.doc_extractor.DOC_EXTRACT_TYPE_COUNTRY,
"province": native.doc_extractor.DOC_EXTRACT_TYPE_PROVINCE,
"abstract": native.doc_extractor.DOC_EXTRACT_TYPE_ABSTRACT,
"positive": native.doc_extractor.DOC_EXTRACT_TYPE_POSITIVE,
"negative": native.doc_extractor.DOC_EXTRACT_TYPE_NEGATIVE,
"text": native.doc_extractor.DOC_EXTRACT_TYPE_TEXT,
"time": native.doc_extractor.DOC_EXTRACT_TYPE_TIME,
"user": native.doc_extractor.DOC_EXTRACT_TYPE_USER
}
def get_available_retrieve_types(self) -> typing.Dict[str, int]:
"""
Get a set of types_name and types available for current extraction result
:return:
"""
return {**self.retrieve_type_map, **self.user_retrieve_type_map}
def set_retrieve_types(self, retrieve_type_list: typing.List[int]) -> bool:
"""
Set what type of data want to get from :func:`get_result` , can be set multi-times
:param retrieve_type_list: list of retrieve types
:return:
"""
self.retrieve_types = retrieve_type_list
return True
@__get_instance__
def get_result(
self,
retrieve_types: typing.Optional[typing.List[int]] = None
) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Union[str, int, float]]]]:
"""
Get result from current result, can be retrieved multi-times.
:param retrieve_types: option, a list of retrieve types want to get,
default is all types can be retrieved or certain types set by :func:`set_retrieve_types`
:return: a dict of result : ``{type_name: [result}]}`` , example
::
{
"person": [
{
"word": "卢梭",
"pos": "n",
"weight": 1.5,
"freq": 100
}
]
}
"""
if retrieve_types is not None:
self.set_retrieve_types(retrieve_types)
result_dict = dict()
for retrieve_type in self.retrieve_types:
result = __instance__.get_result(
handle=self.handle, doc_extract_type=retrieve_type
)
re_, func = self.re_result_map.get(retrieve_type, self.re_sharp_split)
result = re_.findall("" if result is None else result)
result_list = list()
for string_tuple in result:
result_map = func(string_tuple)
result_list.append({
"word": result_map.get(0, None),
"pos": result_map.get(1, None),
"weight": float(result_map.get(2, None)) if result_map.get(2, None) is not None else None,
"freq": int(result_map.get(3, None)) if result_map.get(3, None) is not None else None
})
result_dict[self.__retrieve_type_reverse_map[retrieve_type]] = result_list
return result_dict
@__get_instance__
def get_sentiment_result(self) -> int:
"""
Get sentiment point from current extraction result
:return:
"""
return __instance__.get_sentiment_score(self.handle)
@__get_instance__
@__get_instance__
def get_native_instance() -> native.doc_extractor.DocExtractor:
"""
返回原生NLPIR接口,使用更多函数
:return: The singleton instance
"""
return __instance__
@__get_instance__
def extract(text: str, user_define_pos: typing.List[str]) -> ExtractResult:
"""
:param text:
:param user_define_pos:
:return:
"""
handle = __instance__.pares_doc_e(text, "#".join(user_define_pos))
return ExtractResult(handle=handle, user_retrieve_type=user_define_pos)
@__get_instance__
def import_dict(word_list: list) -> list:
"""
See :func:`nlpir.import_dict`
:param word_list: list of words want to add to NLPIR
:return: the word fail to add to the NLPIR
"""
return nlpir.import_dict(word_list=word_list, instance=__instance__)
@__get_instance__
def clean_user_dict() -> bool:
"""
See :func:`nlpir.clean_user_dict`
:return: success or not
"""
return nlpir.clean_user_dict(instance=__instance__)
@__get_instance__
def delete_user_word(word_list: list):
"""
See :func:`nlpir.delete_user_word`
:param word_list: list of words want to delete
"""
return nlpir.delete_user_word(word_list=word_list, instance=__instance__)
@__get_instance__
def save_user_dict() -> bool:
"""
See :func:`nlpir.save_user_dict`
:return: Success or not
"""
return nlpir.save_user_dict(instance=__instance__)
@__get_instance__
def clean_saved_user_dict():
"""
See :func:`nlpir.clean_saved_user_dict`
:return: Delete success or not
"""
return nlpir.clean_saved_user_dict()
@__get_instance__
def import_blacklist(filename: str, pos_blacklist=typing.List[str]) -> bool:
"""
Import Blacklist to system, see :func:`nlpir.import_blacklist`
"""
return nlpir.import_blacklist(__instance__, filename, pos_blacklist)
@__get_instance__
def clean_blacklist() -> bool:
"""
清除黑名单词表, see :func:`nlpir.clean_blacklist`
:return: clean success or not
"""
return nlpir.clean_blacklist()
@__get_instance__
def recover_blacklist() -> bool:
"""
恢复黑名单词表,仅在被重命名的词表存在时才起作用, see :func:`nlpir.recover_blacklist`
:return:
"""
return nlpir.recover_blacklist()
| 34.359551 | 118 | 0.653259 | #! coding=utf-8
"""
high-level toolbox for Document Extractor
"""
import re
import typing
import nlpir
from nlpir import get_instance as __get_instance__
from nlpir import native
# class and class instance
__cls__ = native.doc_extractor.DocExtractor
__instance__: typing.Optional[native.doc_extractor.DocExtractor] = None
# Location of DLL
__lib__ = None
# Data directory
__data__ = None
# license_code
__license_code__ = None
# encode
__nlpir_encode__ = native.UTF8_CODE
class ExtractResult:
"""
A class for retrieve result from Document Extractor's handle
"""
#: Types map can be retrieved from DocExtractor
retrieve_type_map: typing.Dict[str, int] = {
"person": native.doc_extractor.DOC_EXTRACT_TYPE_PERSON,
"location": native.doc_extractor.DOC_EXTRACT_TYPE_LOCATION,
"organization": native.doc_extractor.DOC_EXTRACT_TYPE_ORGANIZATION,
"keyword": native.doc_extractor.DOC_EXTRACT_TYPE_KEYWORD,
"author": native.doc_extractor.DOC_EXTRACT_TYPE_AUTHOR,
"media": native.doc_extractor.DOC_EXTRACT_TYPE_MEDIA,
"country": native.doc_extractor.DOC_EXTRACT_TYPE_COUNTRY,
"province": native.doc_extractor.DOC_EXTRACT_TYPE_PROVINCE,
"abstract": native.doc_extractor.DOC_EXTRACT_TYPE_ABSTRACT,
"positive": native.doc_extractor.DOC_EXTRACT_TYPE_POSITIVE,
"negative": native.doc_extractor.DOC_EXTRACT_TYPE_NEGATIVE,
"text": native.doc_extractor.DOC_EXTRACT_TYPE_TEXT,
"time": native.doc_extractor.DOC_EXTRACT_TYPE_TIME,
"user": native.doc_extractor.DOC_EXTRACT_TYPE_USER
}
def __init__(self, handle: int, user_retrieve_type: typing.List[str]):
self.handle: int = handle
# add user defined pos
self.user_retrieve_type_map: typing.Dict[str, int] = {
_: self.retrieve_type_map["user"] + i for i, _ in user_retrieve_type
}
self.retrieve_types: typing.List[int] = [
native.doc_extractor.DOC_EXTRACT_TYPE_PERSON,
native.doc_extractor.DOC_EXTRACT_TYPE_LOCATION,
native.doc_extractor.DOC_EXTRACT_TYPE_ORGANIZATION,
native.doc_extractor.DOC_EXTRACT_TYPE_KEYWORD,
native.doc_extractor.DOC_EXTRACT_TYPE_AUTHOR,
native.doc_extractor.DOC_EXTRACT_TYPE_MEDIA,
native.doc_extractor.DOC_EXTRACT_TYPE_COUNTRY,
native.doc_extractor.DOC_EXTRACT_TYPE_PROVINCE,
native.doc_extractor.DOC_EXTRACT_TYPE_ABSTRACT,
native.doc_extractor.DOC_EXTRACT_TYPE_POSITIVE,
native.doc_extractor.DOC_EXTRACT_TYPE_NEGATIVE,
native.doc_extractor.DOC_EXTRACT_TYPE_TEXT,
native.doc_extractor.DOC_EXTRACT_TYPE_TIME,
]
self.retrieve_types += self.user_retrieve_type_map.values()
self.all_available_type_map = self.get_available_retrieve_types()
self.__retrieve_type_reverse_map = {
self.all_available_type_map[retrieve_type]: retrieve_type for retrieve_type in self.all_available_type_map
}
self.re_sharp_split = re.compile(r"([^#]+)#"), lambda _: {0: _}
self.re_key_words = re.compile(r"([^/^#]+)/([^/^#]+)/([.\d]+?)/(\d+)?#"), lambda _: {0: _[0], 1: _[1],
2: _[2], 3: _[3]}
self.re_weight = re.compile(r"(.+?)/(\d+)#"), lambda _: {0: _[0], 2: _[1]}
self.re_none = re.compile("(.+)"), lambda _: {0: _}
self.re_result_map = {
native.doc_extractor.DOC_EXTRACT_TYPE_KEYWORD: self.re_key_words,
native.doc_extractor.DOC_EXTRACT_TYPE_AUTHOR: self.re_none,
native.doc_extractor.DOC_EXTRACT_TYPE_MEDIA: self.re_none,
native.doc_extractor.DOC_EXTRACT_TYPE_ABSTRACT: self.re_none,
native.doc_extractor.DOC_EXTRACT_TYPE_POSITIVE: self.re_weight,
native.doc_extractor.DOC_EXTRACT_TYPE_NEGATIVE: self.re_weight,
native.doc_extractor.DOC_EXTRACT_TYPE_TEXT: self.re_none,
native.doc_extractor.DOC_EXTRACT_TYPE_TIME: self.re_sharp_split,
}
def get_available_retrieve_types(self) -> typing.Dict[str, int]:
"""
Get a set of types_name and types available for current extraction result
:return:
"""
return {**self.retrieve_type_map, **self.user_retrieve_type_map}
def set_retrieve_types(self, retrieve_type_list: typing.List[int]) -> bool:
"""
Set what type of data want to get from :func:`get_result` , can be set multi-times
:param retrieve_type_list: list of retrieve types
:return:
"""
self.retrieve_types = retrieve_type_list
return True
@__get_instance__
def get_result(
self,
retrieve_types: typing.Optional[typing.List[int]] = None
) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Union[str, int, float]]]]:
"""
Get result from current result, can be retrieved multi-times.
:param retrieve_types: option, a list of retrieve types want to get,
default is all types can be retrieved or certain types set by :func:`set_retrieve_types`
:return: a dict of result : ``{type_name: [result}]}`` , example
::
{
"person": [
{
"word": "卢梭",
"pos": "n",
"weight": 1.5,
"freq": 100
}
]
}
"""
if retrieve_types is not None:
self.set_retrieve_types(retrieve_types)
result_dict = dict()
for retrieve_type in self.retrieve_types:
result = __instance__.get_result(
handle=self.handle, doc_extract_type=retrieve_type
)
re_, func = self.re_result_map.get(retrieve_type, self.re_sharp_split)
result = re_.findall("" if result is None else result)
result_list = list()
for string_tuple in result:
result_map = func(string_tuple)
result_list.append({
"word": result_map.get(0, None),
"pos": result_map.get(1, None),
"weight": float(result_map.get(2, None)) if result_map.get(2, None) is not None else None,
"freq": int(result_map.get(3, None)) if result_map.get(3, None) is not None else None
})
result_dict[self.__retrieve_type_reverse_map[retrieve_type]] = result_list
return result_dict
@__get_instance__
def get_sentiment_result(self) -> int:
"""
Get sentiment point from current extraction result
:return:
"""
return __instance__.get_sentiment_score(self.handle)
@__get_instance__
def __del__(self):
return __instance__.release_handle(self.handle)
@__get_instance__
def get_native_instance() -> native.doc_extractor.DocExtractor:
"""
返回原生NLPIR接口,使用更多函数
:return: The singleton instance
"""
return __instance__
@__get_instance__
def extract(text: str, user_define_pos: typing.List[str]) -> ExtractResult:
"""
:param text:
:param user_define_pos:
:return:
"""
handle = __instance__.pares_doc_e(text, "#".join(user_define_pos))
return ExtractResult(handle=handle, user_retrieve_type=user_define_pos)
@__get_instance__
def import_dict(word_list: list) -> list:
"""
See :func:`nlpir.import_dict`
:param word_list: list of words want to add to NLPIR
:return: the word fail to add to the NLPIR
"""
return nlpir.import_dict(word_list=word_list, instance=__instance__)
@__get_instance__
def clean_user_dict() -> bool:
"""
See :func:`nlpir.clean_user_dict`
:return: success or not
"""
return nlpir.clean_user_dict(instance=__instance__)
@__get_instance__
def delete_user_word(word_list: list):
"""
See :func:`nlpir.delete_user_word`
:param word_list: list of words want to delete
"""
return nlpir.delete_user_word(word_list=word_list, instance=__instance__)
@__get_instance__
def save_user_dict() -> bool:
"""
See :func:`nlpir.save_user_dict`
:return: Success or not
"""
return nlpir.save_user_dict(instance=__instance__)
@__get_instance__
def clean_saved_user_dict():
"""
See :func:`nlpir.clean_saved_user_dict`
:return: Delete success or not
"""
return nlpir.clean_saved_user_dict()
@__get_instance__
def import_blacklist(filename: str, pos_blacklist=typing.List[str]) -> bool:
"""
Import Blacklist to system, see :func:`nlpir.import_blacklist`
"""
return nlpir.import_blacklist(__instance__, filename, pos_blacklist)
@__get_instance__
def clean_blacklist() -> bool:
"""
清除黑名单词表, see :func:`nlpir.clean_blacklist`
:return: clean success or not
"""
return nlpir.clean_blacklist()
@__get_instance__
def recover_blacklist() -> bool:
"""
恢复黑名单词表,仅在被重命名的词表存在时才起作用, see :func:`nlpir.recover_blacklist`
:return:
"""
return nlpir.recover_blacklist()
| 2,538 | 0 | 53 |
56e0389354e0755a6dd537bd014b8b73b969c4c2 | 2,113 | py | Python | scripts/javierre2016_format.py | thehyve/genetics-v2g-data | 96544d075955282ab0f8bef287a11bb782942cf7 | [
"Apache-2.0"
] | 7 | 2019-01-30T01:08:15.000Z | 2022-03-07T20:43:53.000Z | scripts/javierre2016_format.py | thehyve/genetics-v2g-data | 96544d075955282ab0f8bef287a11bb782942cf7 | [
"Apache-2.0"
] | null | null | null | scripts/javierre2016_format.py | thehyve/genetics-v2g-data | 96544d075955282ab0f8bef287a11bb782942cf7 | [
"Apache-2.0"
] | 5 | 2019-07-11T09:37:58.000Z | 2021-07-30T23:05:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Ed Mountjoy (June 2018)
Formats the output of bedtools into final format for loading
'''
import argparse
import pandas as pd
def parse_args():
''' Load command line args '''
parser = argparse.ArgumentParser()
parser.add_argument('--inf', metavar='<file>', help=('Input file'), type=str, required=True)
parser.add_argument('--outf', metavar='<file>', help=('Output file'), type=str, required=True)
parser.add_argument('--cell_name', metavar='<str>', help=('Name of cell type'), type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 31.537313 | 115 | 0.630383 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Ed Mountjoy (June 2018)
Formats the output of bedtools into final format for loading
'''
import argparse
import pandas as pd
def main():
# Parse args
args = parse_args()
# Load
df = pd.read_csv(args.inf, sep='\t', header=None, low_memory=False)
total_lines = df.shape[0]
# Remove interactions between different chromosomes aka. "trans"
cis = (df.iloc[:,3] == df.iloc[:,9])
print("CIS interactions: \t\t{} ({:.2f} %)".format(sum(cis),(sum(cis)*100)/total_lines))
# Remove interactions where the midpoint of the interval is too far away
# from the gene TSS
df['distance'] = abs((df.iloc[:,4] + df.iloc[:,5])/2 - df.iloc[:,10])
twosd_thres = 2.45e6
near = (df['distance'] < twosd_thres)
print("Interactions < {:.2E} bases: \t{} ({:.2f} %)".format(twosd_thres,sum(near),(sum(near)*100)/total_lines))
# Apply filters and extract required columns
filtered_df = df.loc[cis & near]
out_df = filtered_df.iloc[:, [3, 4, 5, 12, 6]].copy()
out_df.columns = ['chrom', 'start', 'end', 'ensembl_id', 'score']
# Add 1 to the start to make everything 1-indexed
out_df.start = out_df.start + 1
# Sort
out_df.chrom = out_df.chrom.astype(str)
out_df = out_df.sort_values(['chrom', 'start', 'end', 'score'])
# Remove duplicates
out_df = out_df.drop_duplicates(
subset=['chrom', 'start', 'end', 'ensembl_id'],
keep='last')
# Write tsv
out_df.loc[:, 'cell_type'] = args.cell_name
out_df.to_csv(args.outf, sep='\t', index=None, compression='gzip')
return 0
def parse_args():
''' Load command line args '''
parser = argparse.ArgumentParser()
parser.add_argument('--inf', metavar='<file>', help=('Input file'), type=str, required=True)
parser.add_argument('--outf', metavar='<file>', help=('Output file'), type=str, required=True)
parser.add_argument('--cell_name', metavar='<str>', help=('Name of cell type'), type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 1,428 | 0 | 23 |
6998666d9e5790f10ece97c00afb21201017f0e7 | 3,643 | py | Python | data_loader/sampling.py | ne-bo/protein | eae8e73b1c5c4b6d886fc3cc362b7568b0914796 | [
"MIT"
] | 1 | 2020-01-07T14:52:04.000Z | 2020-01-07T14:52:04.000Z | data_loader/sampling.py | ne-bo/protein | eae8e73b1c5c4b6d886fc3cc362b7568b0914796 | [
"MIT"
] | null | null | null | data_loader/sampling.py | ne-bo/protein | eae8e73b1c5c4b6d886fc3cc362b7568b0914796 | [
"MIT"
] | null | null | null | import torch
import numpy as np
from torch.utils.data.sampler import Sampler
class UniformSampler(Sampler):
"""Samples elements with roughly uniform distribution of samples with the same label
Arguments:
"""
def __init__(self, data_source,
batch_size,
number_of_different_classes_in_batch,
batches_number):
"""
:param data_source: dataset, should be an inheritor of Dataset
:param batch_size: desired batch size, int
:param number_of_different_classes_in_batch: desired number of different classes in batch,usually 2 or 3
:param batches_number: how many batches you want to create
"""
super().__init__(data_source)
self.data_source = data_source
self.labels = self.data_source.labels
self.length = len(self.labels) # how many samples we have in our dataset
self.number_of_samples_with_the_same_label_in_the_batch = batch_size // number_of_different_classes_in_batch
self.number_of_different_classes_in_batch = number_of_different_classes_in_batch
self.batches_number = batches_number
| 45.5375 | 132 | 0.683777 | import torch
import numpy as np
from torch.utils.data.sampler import Sampler
class UniformSampler(Sampler):
"""Samples elements with roughly uniform distribution of samples with the same label
Arguments:
"""
def __init__(self, data_source,
batch_size,
number_of_different_classes_in_batch,
batches_number):
"""
:param data_source: dataset, should be an inheritor of Dataset
:param batch_size: desired batch size, int
:param number_of_different_classes_in_batch: desired number of different classes in batch,usually 2 or 3
:param batches_number: how many batches you want to create
"""
super().__init__(data_source)
self.data_source = data_source
self.labels = self.data_source.labels
self.length = len(self.labels) # how many samples we have in our dataset
self.number_of_samples_with_the_same_label_in_the_batch = batch_size // number_of_different_classes_in_batch
self.number_of_different_classes_in_batch = number_of_different_classes_in_batch
self.batches_number = batches_number
def draw_samples_with_label(self, label):
labels_array = np.array(self.labels)
# print('labels_array ', labels_array)
# selected_samples = np.where(labels_array.any() == label)[0]
selected_samples = []
for i, labels_list in enumerate(labels_array):
# print('label', label, 'labels_list ', labels_list)
if label in labels_list:
selected_samples.append(i)
selected_samples = np.array(selected_samples)
# print('selected_samples ', selected_samples, len(selected_samples))
# print('self.number_of_samples_with_the_same_label_in_the_batch ', self.number_of_samples_with_the_same_label_in_the_batch)
# in case we have too few sample with this label we just duplicate examples
total_selected_samples = selected_samples
while total_selected_samples.shape[0] < self.number_of_samples_with_the_same_label_in_the_batch:
total_selected_samples = np.hstack((total_selected_samples, selected_samples))
# shuffle
samples = np.random.permutation(total_selected_samples)
# take the requested number of samples
samples = samples[:self.number_of_samples_with_the_same_label_in_the_batch]
#print('samples ', samples)
return samples
def get_new_batch(self):
batch = np.array([], dtype=int)
labels_already_in_batch = []
for class_number in range(self.number_of_different_classes_in_batch):
label = np.random.choice(np.array(np.random.choice(np.array(self.labels))))
# print('our random label is ', label)
#print('labels_already_in_batch ', labels_already_in_batch)
while label in labels_already_in_batch:
label = np.random.choice(np.array(np.random.choice(np.array(self.labels))))
labels_already_in_batch.append(label)
batch = np.hstack((batch, self.draw_samples_with_label(label)))
#print('batch ', batch)
return batch
def __iter__(self):
batches = np.array([], dtype=int)
for batch_number in range(self.batches_number):
new_batch = self.get_new_batch()
batches = np.hstack((batches, new_batch))
return iter(np.array(batches))
def __len__(self):
return self.batches_number * \
self.number_of_different_classes_in_batch * \
self.number_of_samples_with_the_same_label_in_the_batch
| 2,374 | 0 | 108 |
3de391af7f43ef5085f07c3952111098eb27c56b | 23,701 | py | Python | src/nlpia/book_parser.py | AAAI-DISIM-UnivAQ/nlpia | 39bf2ceff2128e5bb7ad233ced55cb55fe70be4a | [
"MIT"
] | 532 | 2016-11-30T03:51:13.000Z | 2022-03-29T13:40:43.000Z | src/nlpia/book_parser.py | AAAI-DISIM-UnivAQ/nlpia | 39bf2ceff2128e5bb7ad233ced55cb55fe70be4a | [
"MIT"
] | 30 | 2017-12-12T12:18:41.000Z | 2022-03-23T14:44:45.000Z | src/nlpia/book_parser.py | AAAI-DISIM-UnivAQ/nlpia | 39bf2ceff2128e5bb7ad233ced55cb55fe70be4a | [
"MIT"
] | 241 | 2017-06-20T11:51:31.000Z | 2022-03-28T09:42:38.000Z | """ Functions for reading ascidoc files (*.adoc, *.asc, *.asciidoc) and tagging each line """
import os
import sys
import glob
import re
import logging
from shutil import copyfile
from pugnlp import futil
from nlpia.regexes import CRE_ACRONYM
# from nlpia.data_utils import iter_lines # FIXME: reuse
from nlpia.constants import BOOK_PATH
from nlpia.regexes import RE_URL_SIMPLE, splitext
from nlpia.loaders import get_url_title, get_url_filemeta
from nlpia.transcoders import delimit_slug
from nlpia.translators import HyperlinkStyleCorrector
from nlpia.futil import rm_rf, rm_r # noqa (used in doctests to clean up)
log = logging.getLogger(__name__)
# FIXME: redundant definitions here from develop branch
BLOCK_DELIMITERS = dict([('--', 'natural'), ('==', 'natural'), ('__', 'natural'), ('**', 'natural'),
('++', 'latex'), ('////', 'comment')])
BLOCK_DELIM_CHRS = dict([(k[0], v) for k, v in BLOCK_DELIMITERS.items()])
BLOCK_DELIM_REGEXES = dict([(r'^[' + s[0] + r']{' + str(len(s)) + r',160}$', tag) for (s, tag) in BLOCK_DELIMITERS.items()])
BLOCK_HEADERS = dict([('[tip]', 'natural'), ('[note]', 'natural'), ('[important]', 'natural'), ('[quote]', 'natural')])
CRE_BLOCK_DELIMITER = re.compile('|'.join([s for s, tag in BLOCK_DELIM_REGEXES.items()]))
HEADER_TYPES = [('source', 'code'), ('latex', 'latex')]
# Working definitions from master branch
BLOCK_DELIMITERS = dict([('--', 'code'), ('==', 'natural_sidenote'), ('__', 'natural_quote'), ('**', 'natural_asside'),
('++', 'latexmath'), ('//', 'comment')])
BLOCK_DELIMITER_CHRS = ''.join([k[0] for k in BLOCK_DELIMITERS.keys()])
BLOCK_HEADERS = dict([('[tip]', 'natural_tip'), ('[note]', 'natural_note'),
('[important]', 'natural_important'), ('[quote]', 'natural_quote')])
BLOCK_HEADERS4 = dict([(k[:4], v) for k, v in BLOCK_HEADERS.items()])
CRE_BLOCK_DELIMITER = re.compile(r'^[' + BLOCK_DELIMITER_CHRS + r']{2,50}$')
CRE_ANNOTATION = re.compile(r'^<([0-9]{1,2})>.*')
HEADER_TYPES = [('source', 'code'), ('latex', 'latex'), ('latexmath', 'latex'),
('template="glossary"', 'natural_glossary'), ("template='glossary'", 'natural_glossary')]
VALID_TAGS = set(['anchor', 'attribute', 'blank_line', 'block_header', 'caption', 'code', 'code_end', 'code_start', ] +
[b for b in BLOCK_DELIMITERS.values()] +
[b + '_start' for b in BLOCK_DELIMITERS.values()] +
[b + '_end' for b in BLOCK_DELIMITERS.values()] +
['natural_heading{}'.format(i) for i in range(1, 6)] +
['image_link', 'natural', 'natural_end', 'natural_start', 'code_header'])
INCLUDE_TAGS = set(['natural', 'caption'] + ['natural_heading{}'.format(i) for i in range(1, 6)])
re_bad_footnotes = re.compile(r'footnote:\[' + RE_URL_SIMPLE + r'\]')
def get_lines(file_path=BOOK_PATH):
r""" Retrieve text lines from the manuscript Chapter*.asc and Appendix*.asc files
Args:
file_path (str): Path to directory containing manuscript asciidoc files
i.e.: /Users/cole-home/repos/nlpinaction/manuscript/ or nlpia.constants.BOOK_PATH
Returns:
list of lists of str, one list for each Chapter or Appendix
>>> lines = get_lines(BOOK_PATH)
>>> next(lines)
('.../src/nlpia/data/book/Appendix F -- Glossary.adoc',
['= Glossary\n',
'\n',
"We've collected some ...])
"""
if os.path.isdir(file_path):
file_path = os.path.join(file_path, '*.adoc')
files = glob.glob(file_path)
elif os.path.isfile(file_path):
files = [file_path]
elif '*' in file_path:
if os.path.sep not in file_path:
file_path = os.path.join(os.path.abspath(os.path.curdir), file_path)
files = glob.glob(file_path)
else:
raise FileNotFoundError("Unable to find the directory or files requested.")
lines = []
for filepath in files:
with open(filepath, 'r') as f:
lines.append(f.readlines())
return zip(files, lines)
def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')):
""" Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """
acronyms = []
for f, lines in get_lines(manuscript):
for line in lines:
matches = CRE_ACRONYM.finditer(line)
if matches:
for m in matches:
if m.group('a2'):
acronyms.append((m.group('a2'), m.group('s2')))
elif m.group('a3'):
acronyms.append((m.group('a3'), m.group('s3')))
elif m.group('a4'):
acronyms.append((m.group('a4'), m.group('s4')))
elif m.group('a5'):
acronyms.append((m.group('a5'), m.group('s5')))
return sorted(dict(acronyms).items())
def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None):
""" Compose an asciidoc string with acronyms culled from the manuscript """
linesep = linesep or os.linesep
lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]']
acronyms = get_acronyms(manuscript)
for a in acronyms:
lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:]))
return linesep.join(lines)
def tag_lines(lines, include_tags=None):
r""" Naively tags lines from manuscript with: code, natural, heading, etc.
Returns:
list of tuples [(tag, line), ...]
>>> ' '.join(sorted(VALID_TAGS))
'anchor attribute blank_line block_header caption code code_end code_header code_start
comment comment_end comment_start image_link latexmath latexmath_end latexmath_start
natural natural_asside natural_asside_end natural_asside_start natural_end
natural_heading1 natural_heading2 natural_heading3 natural_heading4 natural_heading5
natural_quote natural_quote_end natural_quote_start
natural_sidenote natural_sidenote_end natural_sidenote_start natural_start'
>>> list(tag_lines('|= Title| :chapter: 0|Hello|cruel world|==Heading Level 2| \t| [source,bash]|====|$ grep this|====|'\
... .split('|')))
[('blank_line', ''), ('natural_heading1', '= Title'), ('attribute', ' :chapter: 0'),
('natural', 'Hello'), ('natural', 'cruel world'), ('natural_heading2', '==Heading Level 2'),
('blank_line', ' \t'), ('code_header', ' [source,bash]'),
('code_start', '===='), ('code', '$ grep this'), ('code_end', '===='), ('blank_line', '')]
"""
current_block_type = None
block_terminator = None
tag = ''
tagged_lines = []
for idx, line in enumerate(lines):
normalized_line = line.lower().strip().replace(" ", "")
# [source,...] with or without any following "----" block delimiter
# TODO: make this a regex that classifies among the different types (source, glossary, tip, etc)
header_type = next((HEADER_TYPES[i] for i in range(len(HEADER_TYPES)) if
normalized_line.startswith('[') and normalized_line[1:].startswith(HEADER_TYPES[i][0])),
None)
if header_type:
current_block_type = header_type[1]
tag = current_block_type + '_header'
block_terminator = None
elif normalized_line[:4] in BLOCK_HEADERS4:
current_block_type = BLOCK_HEADERS4[normalized_line[:4]]
tag = current_block_type + '_header' # BLOCK_HEADERS[normalized_line]
block_terminator = None
elif (
CRE_BLOCK_DELIMITER.match(normalized_line) and
normalized_line[:2] in BLOCK_DELIMITERS): # or (tag in set('caption anchor'.split()))):
if (not idx or not current_block_type or not block_terminator):
current_block_type = (current_block_type or BLOCK_DELIMITERS[normalized_line[:2]])
tag = current_block_type + '_start'
block_terminator = normalized_line
elif block_terminator and line.rstrip() == block_terminator:
tag = current_block_type + '_end'
current_block_type = None
block_terminator = None
else:
tag = current_block_type
elif current_block_type and (line.rstrip() == block_terminator or
(not block_terminator and not normalized_line)):
tag = current_block_type + '_end'
current_block_type = None
block_terminator = None
elif current_block_type:
tag = current_block_type
elif not normalized_line:
tag = 'blank_line'
elif normalized_line.startswith(r'//'):
tag = 'comment'
elif normalized_line.startswith(r':'):
tag = 'attribute'
elif normalized_line.startswith('='):
tag = 'natural_heading'
tag += str(len([c for c in normalized_line[:6].split()[0] if c == '=']))
elif normalized_line.startswith('.'):
tag = 'caption'
elif normalized_line.startswith('image:'):
tag = 'image_link'
elif normalized_line.startswith('[['):
tag = 'anchor'
else:
tag = 'natural'
current_block_type = None
tagged_lines.append((tag, line))
return filter_tagged_lines(tagged_lines, include_tags=include_tags)
def get_tagged_sections(book_dir=BOOK_PATH, include_tags=None):
""" Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str))
>>> get_tagged_sections()
[('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
"""
return [(filepath, tag_lines(lines, include_tags=include_tags)) for filepath, lines in get_lines(book_dir)]
def find_bad_footnote_urls(tagged_lines, include_tags=None):
""" Find lines in the list of 2-tuples of adoc-tagged lines that contain bad footnotes (only urls)
>>> sections = get_tagged_sections(BOOK_PATH)
>>> tagged_lines = list(sections[0][1])
>>> find_bad_footnote_urls(tagged_lines)
[[30, 'https://spacy.io/usage/linguistic-features#rule-based-morphology']]
"""
section_baddies = []
log.debug(tagged_lines[:2])
for lineno, (tag, line) in enumerate(tagged_lines):
line_baddies = None
if tag is None or include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags)):
line_baddies = get_line_bad_footnotes(line=line, tag=tag)
if line_baddies and len(line_baddies) > 1:
section_baddies.append([lineno] + line_baddies[1:])
else:
pass
# section_baddies.append(line)
return section_baddies
# def find_all_bad_footnote_urls(book_dir=BOOK_PATH, include_tags=['natural']):
# """ Find lines in the manuscript that contain bad footnotes (only urls) """
# sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags)
# bad_url_lines = {}
# for fileid, (filepath, tagged_lines) in enumerate(sections):
# section_baddies = find_bad_footnote_urls(tagged_lines, include_tags=include_tags)
# if section_baddies:
# bad_url_lines[filepath] = section_baddies
# return bad_url_lines
def infer_url_title(url):
""" Guess what the page title is going to be from the path and FQDN in the URL
>>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html')
'the what if tool code free probing of'
"""
meta = get_url_filemeta(url)
title = ''
if meta:
if meta.get('hostname', url) == 'drive.google.com':
title = get_url_title(url)
else:
title = meta.get('filename', meta['hostname']) or meta['hostname']
title, fileext = splitext(title)
else:
logging.error('Unable to retrieve URL: {}'.format(url))
return None
return delimit_slug(title, ' ')
def get_line_bad_footnotes(line, tag=None, include_tags=None):
""" Return [original_line, url_footnote1, url_footnote2, ... url_footnoteN] for N bad footnotes in the line """
if tag is None or include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags)):
found_baddies = re_bad_footnotes.findall(line)
return [line] + [baddie[0] for baddie in found_baddies]
return [line]
def translate_line_footnotes(line, tag=None, default_title='<NOT_FOUND>'):
r""" Find all bare-url footnotes, like "footnote:[moz.org]" and add a title like "footnote:[Moz (moz.org)]"
>>> translate_line_footnotes('*Morphemes*:: Parts of tokens or words that contain meaning in and of themselves.'\
... 'footnote:[https://spacy.io/usage/linguistic-features#rule-based-morphology]')
'*Morphemes*:: Parts of tokens or words that contain meaning in and of
themselves.footnote:[See the web page titled "Linguistic Features : spaCy Usage Documentation"
(https://spacy.io/usage/linguistic-features#rule-based-morphology).]'
"""
line_urls = get_line_bad_footnotes(line, tag=tag)
urls = line_urls[1:] if line_urls else []
for url in urls:
footnote = 'footnote:[{url}]'.format(url=url)
new_footnote = footnote
# TODO: use these to extract name from hyperlinks
title = get_url_title(url)
title = title or infer_url_title(url)
title = (title or '').strip(' \t\n\r\f-_:|="\'/\\')
title = title if ' ' in (title or 'X') else None
if title:
brief_title = title.split('\n')[0].strip().split('|')[0].strip().split('Â')[0].strip().split('·')[0].strip()
logging.info('URL: {}'.format(url))
logging.info('TITLE: {}'.format(title))
title = brief_title if len(brief_title) > 3 and len(title) > 55 else title
title = title.replace('Â', '').replace('·', ':').replace('|', ':').replace('\n', '--')
logging.info('FINAL: {}'.format(title))
title = title or default_title
if title:
new_footnote = 'footnote:[See the web page titled "{title}" ({url}).]'.format(title=(title or default_title), url=url)
elif title is None:
logging.error('Unable to find a title for url: {}'.format(url))
else:
new_footnote = 'footnote:[See the web page ({url}).]'.format(url=url)
line = line.replace(
footnote,
new_footnote)
return line
def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes),
book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" Fix any style corrections listed in `translate` list of translation functions
>>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks'))
3
>>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
"""
if callable(translators) or not hasattr(translators, '__len__'):
translators = (translators,)
sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags)
file_line_maps = []
for fileid, (filepath, tagged_lines) in enumerate(sections):
log.info('filepath={}'.format(filepath))
destpath = filepath
if not dest:
copyfile(filepath, filepath + '.' + ext.lstrip('.'))
elif os.path.sep in dest:
destpath = os.path.join(dest, os.path.basename(filepath))
else:
destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath))
ensure_dir_exists(os.path.dirname(destpath))
with open(destpath, 'w') as fout:
log.info('destpath={}'.format(destpath))
for lineno, (tag, line) in enumerate(tagged_lines):
if (include_tags is None or tag in include_tags or
any((tag.startswith(t) for t in include_tags))):
for translate in translators:
new_line = translate(line) # TODO: be smarter about writing to files in-place
if line != new_line:
file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line))
line = new_line
fout.write(line)
return file_line_maps
def correct_hyperlinks(book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" DEPRECATED (see translate_line_footnotes)
Find bad footnotes (only urls), visit the page, add the title to the footnote
>>> len(correct_hyperlinks(book_dir=BOOK_PATH, dest='cleaned_hyperlinks'))
2
>>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
"""
# bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir)
# file_line_maps = []
return translate_book(translators=HyperlinkStyleCorrector().translate,
book_dir=book_dir, dest=dest, include_tags=include_tags,
ext=ext, skip_untitled=skip_untitled)
def correct_bad_footnote_urls(book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" DEPRECATED (see translate_line_footnotes)
Find bad footnotes (only urls), visit the page, add the title to the footnote
>>> len(correct_bad_footnote_urls(book_dir=BOOK_PATH, dest='cleaned_footnotes'))
1
>>> rm_r(os.path.join(BOOK_PATH, 'cleaned_footnotes'))
"""
# bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir)
# file_line_maps = []
return translate_book(translators=translate_line_footnotes, book_dir=book_dir, dest=dest, include_tags=include_tags,
ext=ext, skip_untitled=skip_untitled)
def filter_lines(input_file, output_file, translate=lambda line: line):
""" Translate all the lines of a single file """
filepath, lines = get_lines([input_file])[0]
return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None):
r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes
>>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')])
<generator object filter_tagged_lines at ...>
>>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')],
... include_tags='natural'))
[('natural', 'Hello.')]
"""
include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags
exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags
for tagged_line in tagged_lines:
if (include_tags is None or tagged_line[0] in include_tags or
any((tagged_line[0].startswith(t) for t in include_tags))):
if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)):
yield tagged_line
else:
log.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format(
tagged_line[0], exclude_tags))
else:
log.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags))
def main(book_dir=BOOK_PATH, include_tags=None, verbosity=1):
r""" Parse all the asciidoc files in book_dir, returning a list of 2-tuples of lists of 2-tuples (tagged lines)
>>> main(BOOK_PATH, verbosity=0)
[('.../src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
>>> main(BOOK_PATH, include_tags='natural', verbosity=1)
= Glossary
We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[Bill Wilson...
at the university of New South Wales in Australia has a more complete one here:...
https://www.cse.unsw.edu.au/~billw/nlpdict.html]...
You can find some of the tools we used to generate this list in the `nlpia` python package at...
...
>>> tagged_lines = list(main(BOOK_PATH, include_tags=['natural', 'blank'], verbosity=0))
>>> len(tagged_lines[0])
2
>>> tagged_lines = list(main(BOOK_PATH, include_tags=['natural', 'blank'], verbosity=1))
= Glossary
<BLANKLINE>
We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[...
>>> tagged_lines = list(main(BOOK_PATH, include_tags='natural', verbosity=1))
= Glossary
We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[...
TODO:
`def filter_tagged_lines(tagged_lines)` that returns an iterable.
"""
if verbosity:
log.info('book_dir: {}'.format(book_dir))
log.info('include_tags: {}'.format(include_tags))
log.info('verbosity: {}'.format(verbosity))
include_tags = [include_tags] if isinstance(include_tags, str) else include_tags
include_tags = None if not include_tags else set([t.lower().strip() for t in include_tags])
sections = get_tagged_sections(book_dir=book_dir)
if verbosity >= 1:
for filepath, tagged_lines in sections:
tagged_lines = filter_tagged_lines(tagged_lines, include_tags=include_tags)
if verbosity > 1:
print('=' * 75)
print(filepath)
print('-' * 75)
if verbosity == 1:
for tag, line in tagged_lines:
print(line)
else:
for tagged_line in tagged_lines:
print(tagged_line)
if verbosity > 1:
print('=' * 79)
print()
else:
log.debug('vebosity={} so nothing output to stdout with print()'.format(verbosity))
return sections
if __name__ == '__main__':
args = sys.argv[1:]
book_dir = os.path.curdir
verbosity = 1
include_tags = tuple(INCLUDE_TAGS)
if args:
book_dir = args[0]
args = args[1:]
if args:
try:
verbosity = int(args[0])
include_tags = args[1:] or include_tags
except ValueError:
verbosity = 1
include_tags = args
if include_tags and include_tags[0].strip().lower()[: 3] in ('all', 'none', 'true'):
include_tags = None
# print('Parsing Chapters and Appendices in: ' + book_dir)
# print('***PRINTING LINES WITH TAGS***: ' + str(include_tags))
main(book_dir=book_dir, include_tags=include_tags, verbosity=verbosity)
| 45.843327 | 130 | 0.628792 | """ Functions for reading ascidoc files (*.adoc, *.asc, *.asciidoc) and tagging each line """
import os
import sys
import glob
import re
import logging
from shutil import copyfile
from pugnlp import futil
from nlpia.regexes import CRE_ACRONYM
# from nlpia.data_utils import iter_lines # FIXME: reuse
from nlpia.constants import BOOK_PATH
from nlpia.regexes import RE_URL_SIMPLE, splitext
from nlpia.loaders import get_url_title, get_url_filemeta
from nlpia.transcoders import delimit_slug
from nlpia.translators import HyperlinkStyleCorrector
from nlpia.futil import rm_rf, rm_r # noqa (used in doctests to clean up)
log = logging.getLogger(__name__)
# FIXME: redundant definitions here from develop branch
BLOCK_DELIMITERS = dict([('--', 'natural'), ('==', 'natural'), ('__', 'natural'), ('**', 'natural'),
('++', 'latex'), ('////', 'comment')])
BLOCK_DELIM_CHRS = dict([(k[0], v) for k, v in BLOCK_DELIMITERS.items()])
BLOCK_DELIM_REGEXES = dict([(r'^[' + s[0] + r']{' + str(len(s)) + r',160}$', tag) for (s, tag) in BLOCK_DELIMITERS.items()])
BLOCK_HEADERS = dict([('[tip]', 'natural'), ('[note]', 'natural'), ('[important]', 'natural'), ('[quote]', 'natural')])
CRE_BLOCK_DELIMITER = re.compile('|'.join([s for s, tag in BLOCK_DELIM_REGEXES.items()]))
HEADER_TYPES = [('source', 'code'), ('latex', 'latex')]
# Working definitions from master branch
BLOCK_DELIMITERS = dict([('--', 'code'), ('==', 'natural_sidenote'), ('__', 'natural_quote'), ('**', 'natural_asside'),
('++', 'latexmath'), ('//', 'comment')])
BLOCK_DELIMITER_CHRS = ''.join([k[0] for k in BLOCK_DELIMITERS.keys()])
BLOCK_HEADERS = dict([('[tip]', 'natural_tip'), ('[note]', 'natural_note'),
('[important]', 'natural_important'), ('[quote]', 'natural_quote')])
BLOCK_HEADERS4 = dict([(k[:4], v) for k, v in BLOCK_HEADERS.items()])
CRE_BLOCK_DELIMITER = re.compile(r'^[' + BLOCK_DELIMITER_CHRS + r']{2,50}$')
CRE_ANNOTATION = re.compile(r'^<([0-9]{1,2})>.*')
HEADER_TYPES = [('source', 'code'), ('latex', 'latex'), ('latexmath', 'latex'),
('template="glossary"', 'natural_glossary'), ("template='glossary'", 'natural_glossary')]
VALID_TAGS = set(['anchor', 'attribute', 'blank_line', 'block_header', 'caption', 'code', 'code_end', 'code_start', ] +
[b for b in BLOCK_DELIMITERS.values()] +
[b + '_start' for b in BLOCK_DELIMITERS.values()] +
[b + '_end' for b in BLOCK_DELIMITERS.values()] +
['natural_heading{}'.format(i) for i in range(1, 6)] +
['image_link', 'natural', 'natural_end', 'natural_start', 'code_header'])
INCLUDE_TAGS = set(['natural', 'caption'] + ['natural_heading{}'.format(i) for i in range(1, 6)])
re_bad_footnotes = re.compile(r'footnote:\[' + RE_URL_SIMPLE + r'\]')
def get_lines(file_path=BOOK_PATH):
r""" Retrieve text lines from the manuscript Chapter*.asc and Appendix*.asc files
Args:
file_path (str): Path to directory containing manuscript asciidoc files
i.e.: /Users/cole-home/repos/nlpinaction/manuscript/ or nlpia.constants.BOOK_PATH
Returns:
list of lists of str, one list for each Chapter or Appendix
>>> lines = get_lines(BOOK_PATH)
>>> next(lines)
('.../src/nlpia/data/book/Appendix F -- Glossary.adoc',
['= Glossary\n',
'\n',
"We've collected some ...])
"""
if os.path.isdir(file_path):
file_path = os.path.join(file_path, '*.adoc')
files = glob.glob(file_path)
elif os.path.isfile(file_path):
files = [file_path]
elif '*' in file_path:
if os.path.sep not in file_path:
file_path = os.path.join(os.path.abspath(os.path.curdir), file_path)
files = glob.glob(file_path)
else:
raise FileNotFoundError("Unable to find the directory or files requested.")
lines = []
for filepath in files:
with open(filepath, 'r') as f:
lines.append(f.readlines())
return zip(files, lines)
def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')):
""" Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """
acronyms = []
for f, lines in get_lines(manuscript):
for line in lines:
matches = CRE_ACRONYM.finditer(line)
if matches:
for m in matches:
if m.group('a2'):
acronyms.append((m.group('a2'), m.group('s2')))
elif m.group('a3'):
acronyms.append((m.group('a3'), m.group('s3')))
elif m.group('a4'):
acronyms.append((m.group('a4'), m.group('s4')))
elif m.group('a5'):
acronyms.append((m.group('a5'), m.group('s5')))
return sorted(dict(acronyms).items())
def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None):
""" Compose an asciidoc string with acronyms culled from the manuscript """
linesep = linesep or os.linesep
lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]']
acronyms = get_acronyms(manuscript)
for a in acronyms:
lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:]))
return linesep.join(lines)
def tag_lines(lines, include_tags=None):
r""" Naively tags lines from manuscript with: code, natural, heading, etc.
Returns:
list of tuples [(tag, line), ...]
>>> ' '.join(sorted(VALID_TAGS))
'anchor attribute blank_line block_header caption code code_end code_header code_start
comment comment_end comment_start image_link latexmath latexmath_end latexmath_start
natural natural_asside natural_asside_end natural_asside_start natural_end
natural_heading1 natural_heading2 natural_heading3 natural_heading4 natural_heading5
natural_quote natural_quote_end natural_quote_start
natural_sidenote natural_sidenote_end natural_sidenote_start natural_start'
>>> list(tag_lines('|= Title| :chapter: 0|Hello|cruel world|==Heading Level 2| \t| [source,bash]|====|$ grep this|====|'\
... .split('|')))
[('blank_line', ''), ('natural_heading1', '= Title'), ('attribute', ' :chapter: 0'),
('natural', 'Hello'), ('natural', 'cruel world'), ('natural_heading2', '==Heading Level 2'),
('blank_line', ' \t'), ('code_header', ' [source,bash]'),
('code_start', '===='), ('code', '$ grep this'), ('code_end', '===='), ('blank_line', '')]
"""
current_block_type = None
block_terminator = None
tag = ''
tagged_lines = []
for idx, line in enumerate(lines):
normalized_line = line.lower().strip().replace(" ", "")
# [source,...] with or without any following "----" block delimiter
# TODO: make this a regex that classifies among the different types (source, glossary, tip, etc)
header_type = next((HEADER_TYPES[i] for i in range(len(HEADER_TYPES)) if
normalized_line.startswith('[') and normalized_line[1:].startswith(HEADER_TYPES[i][0])),
None)
if header_type:
current_block_type = header_type[1]
tag = current_block_type + '_header'
block_terminator = None
elif normalized_line[:4] in BLOCK_HEADERS4:
current_block_type = BLOCK_HEADERS4[normalized_line[:4]]
tag = current_block_type + '_header' # BLOCK_HEADERS[normalized_line]
block_terminator = None
elif (
CRE_BLOCK_DELIMITER.match(normalized_line) and
normalized_line[:2] in BLOCK_DELIMITERS): # or (tag in set('caption anchor'.split()))):
if (not idx or not current_block_type or not block_terminator):
current_block_type = (current_block_type or BLOCK_DELIMITERS[normalized_line[:2]])
tag = current_block_type + '_start'
block_terminator = normalized_line
elif block_terminator and line.rstrip() == block_terminator:
tag = current_block_type + '_end'
current_block_type = None
block_terminator = None
else:
tag = current_block_type
elif current_block_type and (line.rstrip() == block_terminator or
(not block_terminator and not normalized_line)):
tag = current_block_type + '_end'
current_block_type = None
block_terminator = None
elif current_block_type:
tag = current_block_type
elif not normalized_line:
tag = 'blank_line'
elif normalized_line.startswith(r'//'):
tag = 'comment'
elif normalized_line.startswith(r':'):
tag = 'attribute'
elif normalized_line.startswith('='):
tag = 'natural_heading'
tag += str(len([c for c in normalized_line[:6].split()[0] if c == '=']))
elif normalized_line.startswith('.'):
tag = 'caption'
elif normalized_line.startswith('image:'):
tag = 'image_link'
elif normalized_line.startswith('[['):
tag = 'anchor'
else:
tag = 'natural'
current_block_type = None
tagged_lines.append((tag, line))
return filter_tagged_lines(tagged_lines, include_tags=include_tags)
def get_tagged_sections(book_dir=BOOK_PATH, include_tags=None):
""" Get list of (adoc_file_path, (adoc_syntax_tag, raw_line_str))
>>> get_tagged_sections()
[('...src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
"""
return [(filepath, tag_lines(lines, include_tags=include_tags)) for filepath, lines in get_lines(book_dir)]
def find_bad_footnote_urls(tagged_lines, include_tags=None):
""" Find lines in the list of 2-tuples of adoc-tagged lines that contain bad footnotes (only urls)
>>> sections = get_tagged_sections(BOOK_PATH)
>>> tagged_lines = list(sections[0][1])
>>> find_bad_footnote_urls(tagged_lines)
[[30, 'https://spacy.io/usage/linguistic-features#rule-based-morphology']]
"""
section_baddies = []
log.debug(tagged_lines[:2])
for lineno, (tag, line) in enumerate(tagged_lines):
line_baddies = None
if tag is None or include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags)):
line_baddies = get_line_bad_footnotes(line=line, tag=tag)
if line_baddies and len(line_baddies) > 1:
section_baddies.append([lineno] + line_baddies[1:])
else:
pass
# section_baddies.append(line)
return section_baddies
# def find_all_bad_footnote_urls(book_dir=BOOK_PATH, include_tags=['natural']):
# """ Find lines in the manuscript that contain bad footnotes (only urls) """
# sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags)
# bad_url_lines = {}
# for fileid, (filepath, tagged_lines) in enumerate(sections):
# section_baddies = find_bad_footnote_urls(tagged_lines, include_tags=include_tags)
# if section_baddies:
# bad_url_lines[filepath] = section_baddies
# return bad_url_lines
def infer_url_title(url):
""" Guess what the page title is going to be from the path and FQDN in the URL
>>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html')
'the what if tool code free probing of'
"""
meta = get_url_filemeta(url)
title = ''
if meta:
if meta.get('hostname', url) == 'drive.google.com':
title = get_url_title(url)
else:
title = meta.get('filename', meta['hostname']) or meta['hostname']
title, fileext = splitext(title)
else:
logging.error('Unable to retrieve URL: {}'.format(url))
return None
return delimit_slug(title, ' ')
def get_line_bad_footnotes(line, tag=None, include_tags=None):
""" Return [original_line, url_footnote1, url_footnote2, ... url_footnoteN] for N bad footnotes in the line """
if tag is None or include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags)):
found_baddies = re_bad_footnotes.findall(line)
return [line] + [baddie[0] for baddie in found_baddies]
return [line]
def translate_line_footnotes(line, tag=None, default_title='<NOT_FOUND>'):
r""" Find all bare-url footnotes, like "footnote:[moz.org]" and add a title like "footnote:[Moz (moz.org)]"
>>> translate_line_footnotes('*Morphemes*:: Parts of tokens or words that contain meaning in and of themselves.'\
... 'footnote:[https://spacy.io/usage/linguistic-features#rule-based-morphology]')
'*Morphemes*:: Parts of tokens or words that contain meaning in and of
themselves.footnote:[See the web page titled "Linguistic Features : spaCy Usage Documentation"
(https://spacy.io/usage/linguistic-features#rule-based-morphology).]'
"""
line_urls = get_line_bad_footnotes(line, tag=tag)
urls = line_urls[1:] if line_urls else []
for url in urls:
footnote = 'footnote:[{url}]'.format(url=url)
new_footnote = footnote
# TODO: use these to extract name from hyperlinks
title = get_url_title(url)
title = title or infer_url_title(url)
title = (title or '').strip(' \t\n\r\f-_:|="\'/\\')
title = title if ' ' in (title or 'X') else None
if title:
brief_title = title.split('\n')[0].strip().split('|')[0].strip().split('Â')[0].strip().split('·')[0].strip()
logging.info('URL: {}'.format(url))
logging.info('TITLE: {}'.format(title))
title = brief_title if len(brief_title) > 3 and len(title) > 55 else title
title = title.replace('Â', '').replace('·', ':').replace('|', ':').replace('\n', '--')
logging.info('FINAL: {}'.format(title))
title = title or default_title
if title:
new_footnote = 'footnote:[See the web page titled "{title}" ({url}).]'.format(title=(title or default_title), url=url)
elif title is None:
logging.error('Unable to find a title for url: {}'.format(url))
else:
new_footnote = 'footnote:[See the web page ({url}).]'.format(url=url)
line = line.replace(
footnote,
new_footnote)
return line
def ensure_dir_exists(dest):
if dest is not None:
dest = dest.rstrip(os.path.sep)
if os.path.isdir(dest):
pass
elif os.path.isdir(os.path.dirname(dest)):
parent = os.path.dirname(dest)
dest = os.path.join(parent, dest[len(parent):].lstrip(os.path.sep))
elif os.path.sep in dest:
raise FileNotFoundError('Unable to find destination directory for the path: {}'.format(dest))
elif splitext(dest)[1]:
raise FileNotFoundError(
'Unable to find destination directory for the path. It looks like a file path rather than a directory: {}'.format(
dest))
if not os.path.isdir(dest):
log.warning('Creating directory with mkdir_p({})'.format(repr(dest)))
futil.mkdir_p(dest)
log.info('Saving translated files in {}{}*'.format(dest, os.path.sep))
return dest
def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes),
book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" Fix any style corrections listed in `translate` list of translation functions
>>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks'))
3
>>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
"""
if callable(translators) or not hasattr(translators, '__len__'):
translators = (translators,)
sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags)
file_line_maps = []
for fileid, (filepath, tagged_lines) in enumerate(sections):
log.info('filepath={}'.format(filepath))
destpath = filepath
if not dest:
copyfile(filepath, filepath + '.' + ext.lstrip('.'))
elif os.path.sep in dest:
destpath = os.path.join(dest, os.path.basename(filepath))
else:
destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath))
ensure_dir_exists(os.path.dirname(destpath))
with open(destpath, 'w') as fout:
log.info('destpath={}'.format(destpath))
for lineno, (tag, line) in enumerate(tagged_lines):
if (include_tags is None or tag in include_tags or
any((tag.startswith(t) for t in include_tags))):
for translate in translators:
new_line = translate(line) # TODO: be smarter about writing to files in-place
if line != new_line:
file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line))
line = new_line
fout.write(line)
return file_line_maps
def correct_hyperlinks(book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" DEPRECATED (see translate_line_footnotes)
Find bad footnotes (only urls), visit the page, add the title to the footnote
>>> len(correct_hyperlinks(book_dir=BOOK_PATH, dest='cleaned_hyperlinks'))
2
>>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
"""
# bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir)
# file_line_maps = []
return translate_book(translators=HyperlinkStyleCorrector().translate,
book_dir=book_dir, dest=dest, include_tags=include_tags,
ext=ext, skip_untitled=skip_untitled)
def correct_bad_footnote_urls(book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" DEPRECATED (see translate_line_footnotes)
Find bad footnotes (only urls), visit the page, add the title to the footnote
>>> len(correct_bad_footnote_urls(book_dir=BOOK_PATH, dest='cleaned_footnotes'))
1
>>> rm_r(os.path.join(BOOK_PATH, 'cleaned_footnotes'))
"""
# bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir)
# file_line_maps = []
return translate_book(translators=translate_line_footnotes, book_dir=book_dir, dest=dest, include_tags=include_tags,
ext=ext, skip_untitled=skip_untitled)
def filter_lines(input_file, output_file, translate=lambda line: line):
""" Translate all the lines of a single file """
filepath, lines = get_lines([input_file])[0]
return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None):
r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes
>>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')])
<generator object filter_tagged_lines at ...>
>>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')],
... include_tags='natural'))
[('natural', 'Hello.')]
"""
include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags
exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags
for tagged_line in tagged_lines:
if (include_tags is None or tagged_line[0] in include_tags or
any((tagged_line[0].startswith(t) for t in include_tags))):
if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)):
yield tagged_line
else:
log.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format(
tagged_line[0], exclude_tags))
else:
log.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags))
def main(book_dir=BOOK_PATH, include_tags=None, verbosity=1):
r""" Parse all the asciidoc files in book_dir, returning a list of 2-tuples of lists of 2-tuples (tagged lines)
>>> main(BOOK_PATH, verbosity=0)
[('.../src/nlpia/data/book/Appendix F -- Glossary.asc', <generator object filter_tagged_lines at ...>)]
>>> main(BOOK_PATH, include_tags='natural', verbosity=1)
= Glossary
We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[Bill Wilson...
at the university of New South Wales in Australia has a more complete one here:...
https://www.cse.unsw.edu.au/~billw/nlpdict.html]...
You can find some of the tools we used to generate this list in the `nlpia` python package at...
...
>>> tagged_lines = list(main(BOOK_PATH, include_tags=['natural', 'blank'], verbosity=0))
>>> len(tagged_lines[0])
2
>>> tagged_lines = list(main(BOOK_PATH, include_tags=['natural', 'blank'], verbosity=1))
= Glossary
<BLANKLINE>
We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[...
>>> tagged_lines = list(main(BOOK_PATH, include_tags='natural', verbosity=1))
= Glossary
We've collected some definitions of some common NLP and ML acronyms and terminology here.footnote:[...
TODO:
`def filter_tagged_lines(tagged_lines)` that returns an iterable.
"""
if verbosity:
log.info('book_dir: {}'.format(book_dir))
log.info('include_tags: {}'.format(include_tags))
log.info('verbosity: {}'.format(verbosity))
include_tags = [include_tags] if isinstance(include_tags, str) else include_tags
include_tags = None if not include_tags else set([t.lower().strip() for t in include_tags])
sections = get_tagged_sections(book_dir=book_dir)
if verbosity >= 1:
for filepath, tagged_lines in sections:
tagged_lines = filter_tagged_lines(tagged_lines, include_tags=include_tags)
if verbosity > 1:
print('=' * 75)
print(filepath)
print('-' * 75)
if verbosity == 1:
for tag, line in tagged_lines:
print(line)
else:
for tagged_line in tagged_lines:
print(tagged_line)
if verbosity > 1:
print('=' * 79)
print()
else:
log.debug('vebosity={} so nothing output to stdout with print()'.format(verbosity))
return sections
if __name__ == '__main__':
args = sys.argv[1:]
book_dir = os.path.curdir
verbosity = 1
include_tags = tuple(INCLUDE_TAGS)
if args:
book_dir = args[0]
args = args[1:]
if args:
try:
verbosity = int(args[0])
include_tags = args[1:] or include_tags
except ValueError:
verbosity = 1
include_tags = args
if include_tags and include_tags[0].strip().lower()[: 3] in ('all', 'none', 'true'):
include_tags = None
# print('Parsing Chapters and Appendices in: ' + book_dir)
# print('***PRINTING LINES WITH TAGS***: ' + str(include_tags))
main(book_dir=book_dir, include_tags=include_tags, verbosity=verbosity)
| 903 | 0 | 23 |
9b87c9c4019e9dd2f23a08d165e4d38ca60a67db | 966 | py | Python | leetcode/139_Word_Break.py | thiakx/leetcode | cda5b3844331fb244c336bce7a551eafe946531d | [
"MIT"
] | null | null | null | leetcode/139_Word_Break.py | thiakx/leetcode | cda5b3844331fb244c336bce7a551eafe946531d | [
"MIT"
] | null | null | null | leetcode/139_Word_Break.py | thiakx/leetcode | cda5b3844331fb244c336bce7a551eafe946531d | [
"MIT"
] | null | null | null | import unittest
# Given a non-empty string s and a dictionary wordDict containing a list of non-empty words,
# determine if s can be segmented into a space-separated sequence of one or more dictionary words.
s = "leetcode"
wordDict = ["leet", "code"]
output_value = True
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False) # extra conditions for jupyter notebook
| 27.6 | 101 | 0.587992 | import unittest
# Given a non-empty string s and a dictionary wordDict containing a list of non-empty words,
# determine if s can be segmented into a space-separated sequence of one or more dictionary words.
s = "leetcode"
wordDict = ["leet", "code"]
output_value = True
class funcTest(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.wordBreak(s, wordDict), output_value)
class Solution:
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
n = len(s)
dp = n * [False]
for i in range(n):
for w in wordDict:
if w == s[i - len(w) + 1:i + 1] and (dp[i - len(w)] or i - len(w) == -1):
dp[i] = True
return dp[-1]
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False) # extra conditions for jupyter notebook
| 96 | 392 | 72 |
4debe122d78ed3434c05a0ee05862422506ec84e | 1,552 | py | Python | common/caching/cache.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 64 | 2017-09-30T16:23:43.000Z | 2022-03-30T23:26:50.000Z | common/caching/cache.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 339 | 2017-10-26T06:59:14.000Z | 2022-03-10T22:34:29.000Z | common/caching/cache.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 58 | 2017-09-16T17:25:10.000Z | 2022-03-04T18:14:02.000Z | from django.core.cache import cache
from enum import Enum
Cache = CacheWrapper(cache)
| 33.73913 | 105 | 0.659794 | from django.core.cache import cache
from enum import Enum
class CacheWrapper:
_cache_generators = {}
def __init__(self, cache_backend):
self._cache = cache_backend
def get(self, key, generator_func=None):
"""
Retrieve cached value, and cache the value if it is not already cached
:param key: Key of value to retrieve
:param generator_func: Function that generates value
:return: Value mapped to key
"""
return self._cache.get(key) or (generator_func and self._set_with_generator(key, generator_func))
def refresh(self, key, value=None, timeout=None):
"""
Refresh the cached value for a given key, such as when the underlying data has changed
:param key: Key of value to re-cache
:param value: Value to cache
:param timeout Time in seconds to when the cache entry expires
"""
_value = value or (self._cache_generators[key]() if key in self._cache_generators else None)
self._cache.set(key, _value, timeout=timeout)
def clear(self, key):
"""
Delete cache entry
:param key: Key of cache entry to delete
"""
self._cache.delete(key)
def _set_with_generator(self, key, generator_func):
if generator_func is not None:
self._cache_generators[key] = generator_func
generated_value = self._cache_generators[key]()
self._cache.set(key, generated_value, timeout=None)
return generated_value
Cache = CacheWrapper(cache)
| 322 | 1,118 | 23 |
37f9bb3f88182f67d0e5881bab0f1d09a4040aa1 | 601 | py | Python | recognize_main_summary_table_1.py | code4nagoya/covid19-aichi-tools | 27d16f77b23bce212df81312151f8e0a9b66015b | [
"MIT"
] | 6 | 2020-04-07T01:53:54.000Z | 2021-09-15T14:25:16.000Z | recognize_main_summary_table_1.py | code4nagoya/covid19-aichi-tools | 27d16f77b23bce212df81312151f8e0a9b66015b | [
"MIT"
] | 49 | 2020-03-30T14:26:05.000Z | 2022-03-10T08:37:00.000Z | recognize_main_summary_table_1.py | code4nagoya/covid19-aichi-tools | 27d16f77b23bce212df81312151f8e0a9b66015b | [
"MIT"
] | 5 | 2020-04-07T06:25:53.000Z | 2021-01-02T10:09:30.000Z | '''
「検査陽性者の状況」画像から数値データを抽出する処理 パターン1
'''
import re
import pytesseract
import cv2
| 26.130435 | 111 | 0.63228 | '''
「検査陽性者の状況」画像から数値データを抽出する処理 パターン1
'''
import re
import pytesseract
import cv2
def recognize(jpg_path):
src = cv2.imread(str(jpg_path))
img = cv2.inRange(src, (150, 120, 130), (255, 255, 255))
# 範囲指定
img_crop = img[0:550]
# ref http://blog.machine-powers.net/2018/08/02/learning-tesseract-command-utility/
txt = pytesseract.image_to_string(img_crop, lang="jpn", config="--psm 3").replace(".", "").replace(",", "")
print(txt)
# xx人 な箇所を全て抜き出す
data = list(map(lambda str:int(str.replace('人', '')), re.findall("[0-9]+人", txt.replace(',', ''))))
return data
| 529 | 0 | 23 |
b387e7199668212c73f63ac03d9cacc7570911c4 | 1,701 | py | Python | scouter.host/scouter/host/meminfo.py | lunastar12/scouter | 715f7edda67e5450d43122fef2707d3585d049e6 | [
"Apache-2.0"
] | 1 | 2021-09-09T09:56:11.000Z | 2021-09-09T09:56:11.000Z | scouter.host/scouter/host/meminfo.py | lunastar12/scouter | 715f7edda67e5450d43122fef2707d3585d049e6 | [
"Apache-2.0"
] | null | null | null | scouter.host/scouter/host/meminfo.py | lunastar12/scouter | 715f7edda67e5450d43122fef2707d3585d049e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# original code from
# https://github.com/giampaolo/psutil/blob/master/examples/
#
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print system memory information.
"""
import psutil
from cStringIO import StringIO
from scouter.lang.pack import *
from scouter.lang.value import *
if __name__ == '__main__':
# main()
pack = process(None)
print pack
| 24.652174 | 72 | 0.603175 | #!/usr/bin/env python
#
# original code from
# https://github.com/giampaolo/psutil/blob/master/examples/
#
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print system memory information.
"""
import psutil
from cStringIO import StringIO
from scouter.lang.pack import *
from scouter.lang.value import *
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def pprint_ntuple(nt, result):
for name in nt._fields:
value = getattr(nt, name)
if name != 'percent':
value = bytes2human(value)
result.write('%-10s : %7s' % (name.capitalize(), value))
result.write('\n')
def process(param):
result=StringIO()
result.write('MEMORY\n------\n')
pprint_ntuple(psutil.virtual_memory(), result)
result.write('\nSWAP\n----\n')
pprint_ntuple(psutil.swap_memory(), result)
pack = MapPack()
pack.putStr("result", result.getvalue())
result.close()
return pack
def main():
print_('MEMORY\n------')
pprint_ntuple(psutil.virtual_memory())
print_('\nSWAP\n----')
pprint_ntuple(psutil.swap_memory())
if __name__ == '__main__':
# main()
pack = process(None)
print pack
| 1,096 | 0 | 92 |
c5cb97262302fa28b889b7552bd9a4407fe3ba4c | 1,041 | py | Python | prediction_flow/pytorch/din.py | dydcfg/prediction-flow | 332068f521bba51acc8600fe72e36e92c331bef1 | [
"MIT"
] | 211 | 2019-08-02T23:04:40.000Z | 2022-03-18T06:36:25.000Z | prediction_flow/pytorch/din.py | dydcfg/prediction-flow | 332068f521bba51acc8600fe72e36e92c331bef1 | [
"MIT"
] | 18 | 2019-08-10T07:13:05.000Z | 2022-03-17T10:45:30.000Z | prediction_flow/pytorch/din.py | dydcfg/prediction-flow | 332068f521bba51acc8600fe72e36e92c331bef1 | [
"MIT"
] | 51 | 2019-08-02T23:04:41.000Z | 2021-12-24T02:48:58.000Z | """
Deep Interest Network.
"""
from .nn import Attention
from .interest_net import InterestNet
class DIN(InterestNet):
"""Deep Interest Network.
Parameters
----------
features : Features
attention_groups : list of AttentionGroup
num_classes : int
Number of classes.
embedding_size : int
Size of embedding.
hidden_layers : list
Size of hidden layers.
Example: [96, 32]
dnn_activation : str
Activation function of deep layers.
Example: relu
final_activation : str
Activation function of output.
dropout : float
Dropout rate.
"""
| 22.148936 | 62 | 0.643612 | """
Deep Interest Network.
"""
from .nn import Attention
from .interest_net import InterestNet
class DIN(InterestNet):
"""Deep Interest Network.
Parameters
----------
features : Features
attention_groups : list of AttentionGroup
num_classes : int
Number of classes.
embedding_size : int
Size of embedding.
hidden_layers : list
Size of hidden layers.
Example: [96, 32]
dnn_activation : str
Activation function of deep layers.
Example: relu
final_activation : str
Activation function of output.
dropout : float
Dropout rate.
"""
def __init__(self, *args, **kwargs):
super(DIN, self).__init__(*args, **kwargs)
def create_attention_fn(self, attention_group):
return Attention(
attention_group.pairs_count * self.embedding_size,
hidden_layers=attention_group.hidden_layers,
dropout=attention_group.att_dropout,
activation=attention_group.activation)
| 338 | 0 | 53 |
bbd2395815327f3c1981d9835eba367033befe97 | 6,076 | py | Python | call_scripts/reorder/max_bipartitle_match.py | valexsyu/Reorder_nat | c9ed2d55164e7cccd225def867a71b1a1a45a3f4 | [
"MIT"
] | null | null | null | call_scripts/reorder/max_bipartitle_match.py | valexsyu/Reorder_nat | c9ed2d55164e7cccd225def867a71b1a1a45a3f4 | [
"MIT"
] | null | null | null | call_scripts/reorder/max_bipartitle_match.py | valexsyu/Reorder_nat | c9ed2d55164e7cccd225def867a71b1a1a45a3f4 | [
"MIT"
] | null | null | null | # coding=utf-8
import os
from tqdm import tqdm
import networkx as nx
from networkx.algorithms import bipartite
import argparse
import torch
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""
# maximal Bipartite matching.
# python program to find
if __name__ == "__main__":
main()
| 37.04878 | 143 | 0.60813 | # coding=utf-8
import os
from tqdm import tqdm
import networkx as nx
from networkx.algorithms import bipartite
import argparse
import torch
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""
# maximal Bipartite matching.
# python program to find
def read_data(data_type, root_path, src_lang, tgt_lang):
align_data_path = root_path + "/" + data_type + ".align.de-en"
with open(align_data_path, encoding="utf-8") as f:
align_lines = f.readlines()
src_data_path = root_path + "/" + data_type + "." + src_lang
with open(src_data_path, encoding="utf-8") as f:
src_lines = f.readlines()
tgt_data_path = root_path + "/" + data_type + "." + tgt_lang
with open(tgt_data_path, encoding="utf-8") as f:
tgt_lines = f.readlines()
return align_lines, src_lines, tgt_lines
def reorder(graph,src_line, len_src, len_tgt, components):
src_data_reordered = src_line.copy()
src_idx_remain = list(range(len_src))
if len_src >= len_tgt :
src_idx_reordered = src_idx_remain.copy()
src_replace_table = [False]*len_src
else:
src_idx_reordered = list(range(len_tgt))
src_replace_table = [False]*len_tgt
for com_nodes in components :
graph_sub = graph.subgraph(com_nodes)
left_nodes, right_nodes = nx.bipartite.sets(graph_sub)
left_nodes = list(left_nodes)
right_nodes = list(right_nodes)
if left_nodes[0].find('t') >= 0:
left_nodes, right_nodes = right_nodes, left_nodes
#if len(left_nodes) != len(right_nodes):
#pos = nx.bipartite_layout(graph_sub, left_nodes)
#nx.draw(graph_sub, pos=pos, with_labels=True)
#plt.savefig("test.png")
bip_match = list(nx.bipartite.maximum_matching(graph_sub).items())
if len_src >= len_tgt :
for i in range(int(len(bip_match)/2)):
t=0
if bip_match[i][0].find('t') >= 0:
t = 1
src_index = int(bip_match[i][t].replace("s",""))
tgt_index = int(bip_match[i][t-1].replace("t",""))
if not src_replace_table[src_index]:
src_idx_reordered[src_index] = "x"
src_idx_reordered[tgt_index] = src_index
src_replace_table[tgt_index] = True
src_idx_remain.remove(src_index)
else:
for i in range(int(len(bip_match)/2)):
t=0
if bip_match[i][0].find('t') >= 0:
t = 1
src_index = int(bip_match[i][t].replace("s",""))
tgt_index = int(bip_match[i][t-1].replace("t",""))
if not src_replace_table[src_index]:
src_idx_reordered[src_index] = "x"
src_idx_reordered[tgt_index] = src_index
src_replace_table[tgt_index] = True
src_idx_remain.remove(src_index)
if 'x' in src_idx_reordered :
src_idx_reordered = list(filter(lambda x: x!='x', src_idx_reordered))
for remain in src_idx_remain:
if not remain in src_idx_reordered :
if remain > 0 :
src_idx_reordered.insert(src_idx_reordered.index(remain-1)+1,remain)
else:
src_idx_reordered.insert(0,remain)
if len_src < len_tgt :
src_idx_reordered = list(filter(lambda x: x<len_src, src_idx_reordered))
if len(src_idx_reordered) != len_src:
print("ERROR")
import pdb;pdb.set_trace()
for i in range(len_src):
src_data_reordered[i] = src_line[src_idx_reordered[i]]
return src_data_reordered
def build_graph(align_line):
#add s-node and t-node
src_node=[]
tgt_node=[]
src_tgt_edge=[]
if len(align_line) <= 0:
print("Error:align_line is empty")
for i in range(len(align_line)):
word_align = align_line[i].split("-")
snode = "s" + str(word_align[0])
tnode = "t" + str(word_align[1])
src_node.append(snode)
tgt_node.append(tnode)
src_tgt_edge.append((snode,tnode))
graph = nx.Graph()
graph.add_nodes_from(src_node, bipartite=0)
graph.add_nodes_from(tgt_node, bipartite=1)
graph.add_edges_from(src_tgt_edge)
return graph
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root-path', type=str, default="examples/translation/data_distill_reorder", help='path or name of the reorder data')
parser.add_argument('--data-type', type=str, default="train" , help='type : train or test or valid')
args = parser.parse_args()
#root_path = "examples/data/iwslt14.tokenized.de-en/"
root_path = args.root_path
output_name = "reorder"
src_lang = "de"
tgt_lang = "en"
data_types = [args.data_type]
output_data = []
for data_type in data_types:
align_lines, src_lines, tgt_lines = read_data(data_type,root_path, src_lang, tgt_lang)
for line_id, align_line in tqdm(enumerate(align_lines), desc='processing', total=len(align_lines)):
align_line = align_line.strip("\n").split(" ")
src_line = src_lines[line_id].strip("\n").split(" ")
tgt_line = tgt_lines[line_id].strip("\n").split(" ")
len_src = len(src_line)
len_tgt = len(tgt_line)
#build graph
graph = build_graph(align_line)
#find connected component
components = sorted(nx.connected_components(graph), key=len, reverse=True)
src_reordered = reorder(graph, src_line, len_src, len_tgt, components)
temp = (' '.join([str(elem) for elem in src_reordered])) + "\n"
output_data.append(temp)
#write to output path
output_path = root_path + "/" + data_type + "." + src_lang + "." + output_name
with open(output_path, 'w', encoding="utf-8")as f:
for data in output_data:
f.write(data)
if __name__ == "__main__":
main()
| 5,662 | 0 | 92 |
5c6879885153d1e3967897784932fee2096b958e | 6,516 | py | Python | IOLinkAnalyzer.py | hboshnak/saleae-hla-io-link | 5a4f802c7b50d883cd6f4841c7825fd59478e216 | [
"MIT"
] | null | null | null | IOLinkAnalyzer.py | hboshnak/saleae-hla-io-link | 5a4f802c7b50d883cd6f4841c7825fd59478e216 | [
"MIT"
] | null | null | null | IOLinkAnalyzer.py | hboshnak/saleae-hla-io-link | 5a4f802c7b50d883cd6f4841c7825fd59478e216 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
#
# The MIT License (MIT)
#
# Copyright (c) <2021> Hottinger Brüel & Kjaer GmbH, Im Tiefen See 45, 64293 Darmstadt, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# High Level Analyzer
# For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions
from saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting
import IOLinkFrame
import DirectparameterPage
type1_frames = {
# (pd, od)
'Type_1_1': (2, 0),
'Type_1_2': (0, 2),
'Type_1_V (8 OD)': (0, 8),
'Type_1_V (32 OD)': (0, 32),
}
type2_frames = {
# (pdout, od, pdin)
'Type_2_1': (0, 1, 1),
'Type_2_2': (0, 1, 2),
'Type_2_3': (1, 1, 0),
'Type_2_4': (2, 1, 0),
'Type_2_5': (1, 1, 1),
'Type_2_6': (2, 1, 2),
'Type_2_V': (0, 0, 0),
}
# High level analyzers must subclass the HighLevelAnalyzer class.
| 41.503185 | 131 | 0.604052 | # SPDX-License-Identifier: MIT
#
# The MIT License (MIT)
#
# Copyright (c) <2021> Hottinger Brüel & Kjaer GmbH, Im Tiefen See 45, 64293 Darmstadt, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# High Level Analyzer
# For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions
from saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting
import IOLinkFrame
import DirectparameterPage
type1_frames = {
# (pd, od)
'Type_1_1': (2, 0),
'Type_1_2': (0, 2),
'Type_1_V (8 OD)': (0, 8),
'Type_1_V (32 OD)': (0, 32),
}
type2_frames = {
# (pdout, od, pdin)
'Type_2_1': (0, 1, 1),
'Type_2_2': (0, 1, 2),
'Type_2_3': (1, 1, 0),
'Type_2_4': (2, 1, 0),
'Type_2_5': (1, 1, 1),
'Type_2_6': (2, 1, 2),
'Type_2_V': (0, 0, 0),
}
# High level analyzers must subclass the HighLevelAnalyzer class.
class IOLinkAnalyzer(HighLevelAnalyzer):
# Frame Subtype Settings
type1_frame = ChoicesSetting(label = 'Type_1', choices = type1_frames.keys())
type2_frame = ChoicesSetting(label = 'Type_2', choices = type2_frames.keys())
pdout_len = NumberSetting(label='PDout length (only with Type_2_V)', min_value=0.0, max_value=32.0)
od_len = NumberSetting(label='OD length (only with Type_2_V)', min_value=0.0, max_value=32.0)
pdin_len = NumberSetting(label='PDin length (only with Type_2_V)', min_value=0.0, max_value=32.0)
# For each M-Sequence type a formatting string is provided. Type_2_V formatting is generated in __init__
result_types = {
'Type_0': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} OD: {{data.OD}}'
},
'Type_1_1': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} PD: {{data.PD}}'
},
'Type_1_2': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} OD: {{data.OD}}'
},
'Type_1_V': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} OD: {{data.OD}}'
},
'Type_2_1': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} OD: {{data.OD}} PDin: {{data.PDin}}'
},
'Type_2_2': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} OD: {{data.OD}} PDin: {{data.PDin}}'
},
'Type_2_3': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} PDout: {{data.PDout}} OD: {{data.OD}}'
},
'Type_2_4': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} PDout: {{data.PDout}} OD: {{data.OD}}'
},
'Type_2_5': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} PDout: {{data.PDout}} OD: {{data.OD}} PDin: {{data.PDin}}'
},
'Type_2_6': {
'format': '{{data.Direction}} {{data.Channel}} {{data.Addr}} PDout: {{data.PDout}} OD: {{data.OD}} PDin: {{data.PDin}}'
},
}
def __init__(self):
self.process = self.parseByte()
next(self.process)
str = '{{data.Direction}} {{data.Channel}} {{data.Addr}}'
if self.pdout_len > 0:
str += ' PDout: {{data.PDout}}'
if self.od_len > 0:
str += ' OD: {{data.OD}}'
if self.pdin_len > 0:
str += ' PDin: {{data.PDin}}'
self.result_types['Type_2_V'] = {'format': str}
type2_frames['Type_2_V'] = (self.pdout_len, self.od_len, self.pdin_len)
def __del__(self):
self.process.close()
def initFrame(self, frames):
frame0, frame1 = frames[0], frames[1]
if (frame1.start_time - frame0.end_time > (frame0.end_time - frame0.start_time) / 10.5): # frames too far apart
raise ValueError
frametype = frame1.data["data"][0] >> 6
if frametype == 0:
return IOLinkFrame.IOLinkFrameType0(frame0, frame1)
if frametype == 1:
len = type1_frames[self.type1_frame]
return IOLinkFrame.IOLinkFrameType1(self.type1_frame[:8], len, frame0, frame1)
if frametype == 2:
len = type2_frames[self.type2_frame]
return IOLinkFrame.IOLinkFrameType2(self.type2_frame, len, frame0, frame1)
raise ValueError
def parseByte(self):
startframes = ((yield), (yield)) # get 2 start frames
while True:
try:
pendingFrame = self.initFrame(startframes)
except ValueError:
startframes = (startframes[1], (yield)) # keep 1 start frame, get 2nd frame
continue
while True:
nextUARTframe = (yield)
if pendingFrame.canappend(nextUARTframe) is False:
pendingFrame.data['error'] = 'Timing/Framing'
startframes = (nextUARTframe, (yield pendingFrame)) # keep 1 frame, get 2nd frame, return incomplete frame
break
if pendingFrame.append(nextUARTframe) is True:
startframes = ((yield pendingFrame), (yield)) # get 2 new start frames
break
def decode(self, frame: AnalyzerFrame):
if frame.type != 'data':
return
if "error" in frame.data:
return
ret = self.process.send(frame)
if ret is not None and ret.data['Channel'] == 'Page':
DirectparameterPage.printFrame(ret)
return ret
| 2,330 | 2,184 | 22 |
95ead18961640ac7ba0d061bfbd23e70072af4c1 | 1,555 | py | Python | utils/handle_wordpress.py | kcrt/pages | 304ad8245f19c51c9138ef3e2b1a362c880c67d2 | [
"MIT"
] | null | null | null | utils/handle_wordpress.py | kcrt/pages | 304ad8245f19c51c9138ef3e2b1a362c880c67d2 | [
"MIT"
] | null | null | null | utils/handle_wordpress.py | kcrt/pages | 304ad8245f19c51c9138ef3e2b1a362c880c67d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pathlib
from typing import Set
import frontmatter
import datetime
import re
import urllib.request
import os
if __name__ == "__main__":
main()
| 28.272727 | 77 | 0.574277 | #!/usr/bin/env python3
import pathlib
from typing import Set
import frontmatter
import datetime
import re
import urllib.request
import os
def download_file(url, datestr):
print(f"Downloading {url}...")
filename = os.path.basename(url)
# skip urllib.request.urlretrieve(url, f"../images/{datestr}_{filename}")
return f"/images/{datestr}_{filename}"
def main():
blogimg_re = r'(https://blog.kcrt.net/files/.*\.(jpg|png|jpeg|JPG|PNG))'
for file in pathlib.Path(".").glob("*"):
if file.stem == "process":
continue
print(file)
lines = file.read_text()
post = frontmatter.loads(lines)
del post["author"]
del post["meta"]
del post["parent_id"]
del post["password"]
del post["permalink"]
del post["published"]
del post["status"]
post["tags"].append("旧日記")
for cat in post["categories"]:
post["tags"].append(cat)
del post["categories"]
# post["layout"] = "olddiary"
del post["layout"]
urls = [m[0] for m in re.findall(blogimg_re, post.content)]
datestr = post["date"].strftime("%Y-%m-%d")
for url in urls:
filename = download_file(url, datestr)
post.content = post.content.replace(
url, filename)
filename = "../_posts/" + \
post["date"].strftime("%Y-%m-%d-%H%M%S") + ".html"
frontmatter.dump(post, filename)
# print(frontmatter.dumps(post))
if __name__ == "__main__":
main()
| 1,335 | 0 | 46 |
385be9d91500d832e323a6a1573cfce8f7a10920 | 2,151 | py | Python | meta_ultra/modules/humann2.py | dcdanko/MetaUltra | 600df03dd4aaf95268bf0062de2ad3d9ac480842 | [
"MIT"
] | null | null | null | meta_ultra/modules/humann2.py | dcdanko/MetaUltra | 600df03dd4aaf95268bf0062de2ad3d9ac480842 | [
"MIT"
] | null | null | null | meta_ultra/modules/humann2.py | dcdanko/MetaUltra | 600df03dd4aaf95268bf0062de2ad3d9ac480842 | [
"MIT"
] | 1 | 2018-11-02T14:14:38.000Z | 2018-11-02T14:14:38.000Z | import meta_ultra.config as config
from meta_ultra.utils import *
from meta_ultra.modules import *
from meta_ultra.data_type import *
modules.append(Humann2Module)
| 29.875 | 70 | 0.635053 | import meta_ultra.config as config
from meta_ultra.utils import *
from meta_ultra.modules import *
from meta_ultra.data_type import *
class Humann2Module( Module):
def __init__(self, **kwargs):
super(Humann2Module, self).__init__(**kwargs)
self.time = self.getParamOrDefault('time', 1)
self.ram = self.getParamOrDefault('ram', 4)
self.dmnd_time = self.getParamOrDefault('dmnd_time', 1)
self.dmnd_ram = self.getParamOrDefault('dmnd_ram',5)
def buildConf(self, confBuilder):
humann2 = confBuilder.addModule(self.moduleName().upper())
if not humann2:
return
humann2.add_field('EXC',
UserChoice('Humann2',
self.tools,
new=lambda :self.askUserForTool('humann2')
))
humann2.add_field('DB',
UserChoice('Humann2 Reference',
self.refs,
new=lambda : self.askUserForRef('humann2_diamond_db')
))
# Diamond
humann2.add_field('DMND_THREADS',
UserInput('\tHow many threads would you like for humann2',
2*int(confBuilder.get_global_field('THREADS')),
type=int,
fineControlOnly=True))
humann2.add_field('DMND_TIME',
UserInput('\tHow many hours does diamond (in humann2) need',
self.dmnd_time,
type=int,
fineControlOnly=True))
humann2.add_field('DMND_RAM',
UserInput('\tHow many GB of RAM does humann2 need per thread',
self.dmnd_ram,
type=int,
fineControlOnly=True))
# humann2
humann2.add_field('THREADS',
UserInput('\tHow many threads would you like for humann2',
confBuilder.get_global_field('THREADS'),
type=int,
fineControlOnly=True))
humann2.add_field('TIME',
UserInput('\tHow many hours does humann2 need',
self.time,
type=int,
fineControlOnly=True))
humann2.add_field('RAM',
UserInput('\tHow many GB of RAM does humann2 need per thread',
self.ram,
type=int,
fineControlOnly=True))
@staticmethod
def moduleName():
return 'humann2'
modules.append(Humann2Module)
| 1,863 | 98 | 23 |
117c6caed0886eec8c0aac5561ccec9deb809112 | 577 | py | Python | run.py | littlebai3618/bproxypool | 358cf5c14164dadbd0d9bdf7cc3932b46ec81812 | [
"MIT"
] | 2 | 2019-12-26T02:49:59.000Z | 2020-02-11T09:22:07.000Z | run.py | littlebai3618/bproxypool | 358cf5c14164dadbd0d9bdf7cc3932b46ec81812 | [
"MIT"
] | 3 | 2021-03-31T19:33:59.000Z | 2021-12-13T20:29:21.000Z | run.py | littlebai3618/bproxypool | 358cf5c14164dadbd0d9bdf7cc3932b46ec81812 | [
"MIT"
] | 2 | 2020-01-25T12:05:47.000Z | 2020-07-19T02:57:12.000Z | import sys
import traceback
from bproxypool.scheduler import run
from bproxypool.server import create_app
from bproxypool.utils.notify import ding
app = create_app()
if __name__ == '__main__':
# app.run(debug=True)
if len(sys.argv) == 2:
if sys.argv[1] == 'scheduler':
try:
run()
except Exception as e:
tp, msg, tb = sys.exc_info()
e_msg = '>'.join(traceback.format_exception(tp, msg, tb))
ding(f'> ProxyPoolError: \n{e_msg}', 'ProxyPoolError')
raise e
| 26.227273 | 73 | 0.577123 | import sys
import traceback
from bproxypool.scheduler import run
from bproxypool.server import create_app
from bproxypool.utils.notify import ding
app = create_app()
if __name__ == '__main__':
# app.run(debug=True)
if len(sys.argv) == 2:
if sys.argv[1] == 'scheduler':
try:
run()
except Exception as e:
tp, msg, tb = sys.exc_info()
e_msg = '>'.join(traceback.format_exception(tp, msg, tb))
ding(f'> ProxyPoolError: \n{e_msg}', 'ProxyPoolError')
raise e
| 0 | 0 | 0 |
d9aa8299a5b6d0e871446d942b4e7815a43a9371 | 1,404 | py | Python | fable/fable_sources/libtbx/tst_easy_pickle.py | hickerson/bbn | 17ef63ad1717553ab2abb50592f8de79228c8523 | [
"MIT"
] | 4 | 2016-09-30T15:03:39.000Z | 2021-03-25T13:27:08.000Z | fable/fable_sources/libtbx/tst_easy_pickle.py | hickerson/bbn | 17ef63ad1717553ab2abb50592f8de79228c8523 | [
"MIT"
] | 1 | 2018-04-18T14:41:18.000Z | 2018-04-20T19:33:52.000Z | fable/fable_sources/libtbx/tst_easy_pickle.py | hickerson/bbn | 17ef63ad1717553ab2abb50592f8de79228c8523 | [
"MIT"
] | 3 | 2016-04-19T18:20:30.000Z | 2019-04-03T14:54:29.000Z | from __future__ import division
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
| 26 | 64 | 0.614672 | from __future__ import division
def exercise(n, use_dumps=False):
from libtbx import easy_pickle
import time
obj = []
for i in xrange(n):
obj.append([i,i])
for dgz in ["", ".gz"]:
t0 = time.time()
if (use_dumps):
print "dumps/loads"
pickle_string = easy_pickle.dumps(obj=obj)
else:
file_name = "test.dat"+dgz
print file_name
easy_pickle.dump(file_name=file_name, obj=obj)
print " dump: %.2f s" % (time.time()-t0)
del obj
t0 = time.time()
if (use_dumps):
obj = easy_pickle.loads(pickle_string)
else:
obj = easy_pickle.load(file_name=file_name)
print " load buffered: %.2f s" % (time.time()-t0)
if (use_dumps):
break
else:
del obj
t0 = time.time()
obj = easy_pickle.load(
file_name=file_name, faster_but_using_more_memory=False)
print " load direct: %.2f s" % (time.time()-t0)
# XXX pickling and unpickling of libtbx.Auto
# couldn't think of a better place to test this...
from libtbx import Auto
a = easy_pickle.dumps(Auto)
b = easy_pickle.loads(a)
assert (b is Auto) and (b == Auto)
def run(args):
assert len(args) in [0,1]
if (len(args) == 0):
n = 100
else:
n = int(args[0])
assert n >= 0
for use_dumps in [False, True]:
exercise(n, use_dumps)
print "OK"
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
| 1,259 | 0 | 45 |
3cca83b19bf8c9f61ae5e1053ec7aadeeecbdd45 | 1,104 | py | Python | strategypy/bots/happiness.py | davide-ceretti/strategypy | 37df9569e3a9fc8a0f1487a29a7897db6363c42e | [
"MIT"
] | 8 | 2015-03-03T17:40:41.000Z | 2020-11-08T19:02:23.000Z | strategypy/bots/happiness.py | davide-ceretti/strategypy | 37df9569e3a9fc8a0f1487a29a7897db6363c42e | [
"MIT"
] | 19 | 2015-01-14T12:07:05.000Z | 2015-03-19T11:53:11.000Z | strategypy/bots/happiness.py | davide-ceretti/strategypy | 37df9569e3a9fc8a0f1487a29a7897db6363c42e | [
"MIT"
] | 6 | 2015-03-16T18:17:06.000Z | 2021-11-04T23:44:47.000Z | import random
import sys
from copy import deepcopy
from .happines_base import Bot as HappinessBaseBot
"""
...........
.....3.....
....323....
...32123...
..321H123..
...32X23...
....323....
.....3.....
...........
...........
...........
"""
bot = Bot()
| 20.072727 | 74 | 0.569746 | import random
import sys
from copy import deepcopy
from .happines_base import Bot as HappinessBaseBot
"""
...........
.....3.....
....323....
...32123...
..321H123..
...32X23...
....323....
.....3.....
...........
...........
...........
"""
class Bot(HappinessBaseBot):
def calc_happiness(self, friend_dist, enemy_dist):
happiness = 0. #+ random.random()/100.
# occupied cells are unhappy
if friend_dist[0] or enemy_dist[0]:
return happiness
# friends make me happy
for distance in range(1, 100):
max_per_shell = 4 * distance
shell_occupancy = (friend_dist[distance] * 1. / max_per_shell)
happiness += min(shell_occupancy, 0.74) / distance
# enemiess make me happy, but not as much
for distance in range(1, 100):
max_per_shell = 4 * distance
shell_occupancy = (enemy_dist[distance] * 1. / max_per_shell)
happiness += 0.5 * min(shell_occupancy, 0.74) / distance
return happiness
bot = Bot()
def action(ctx):
return bot.action(ctx)
| 763 | 7 | 73 |
07573237f0ac1e8c2409a2a818e87a59070a1fae | 5,035 | py | Python | deployment/clusterCmd.py | NunoEdgarGFlowHub/pai | 0eb286f029d78865c24feb6aabcc990c44336c11 | [
"MIT"
] | 1 | 2019-04-28T23:30:27.000Z | 2019-04-28T23:30:27.000Z | deployment/clusterCmd.py | 289202471/pai | 07177b18c5b780457d633be1691f34bfb7f64523 | [
"MIT"
] | null | null | null | deployment/clusterCmd.py | 289202471/pai | 07177b18c5b780457d633be1691f34bfb7f64523 | [
"MIT"
] | 1 | 2019-11-18T13:27:07.000Z | 2019-11-18T13:27:07.000Z | import os
import sys
import time
import logging
import logging.config
from k8sPaiLibrary.maintainlib import add as k8s_add
from k8sPaiLibrary.maintainlib import remove as k8s_remove
from k8sPaiLibrary.maintainlib import etcdfix as k8s_etcd_fix
from k8sPaiLibrary.maintainlib import kubectl_conf_check
from k8sPaiLibrary.maintainlib import kubectl_install
from k8sPaiLibrary.maintainlib import update as k8s_update
from k8sPaiLibrary.maintainlib import k8s_util
from clusterObjectModel.cluster_object_model import cluster_object_model
logger = logging.getLogger(__name__)
| 50.858586 | 139 | 0.581927 | import os
import sys
import time
import logging
import logging.config
from k8sPaiLibrary.maintainlib import add as k8s_add
from k8sPaiLibrary.maintainlib import remove as k8s_remove
from k8sPaiLibrary.maintainlib import etcdfix as k8s_etcd_fix
from k8sPaiLibrary.maintainlib import kubectl_conf_check
from k8sPaiLibrary.maintainlib import kubectl_install
from k8sPaiLibrary.maintainlib import update as k8s_update
from k8sPaiLibrary.maintainlib import k8s_util
from clusterObjectModel.cluster_object_model import cluster_object_model
logger = logging.getLogger(__name__)
class ClusterCmd():
def register(self, parser):
cluster_parser = parser.add_subparsers(help="cluster operations")
# ./paictl.py cluster k8s-bootup ...
bootup_parser = cluster_parser.add_parser("k8s-bootup")
bootup_parser.add_argument("-p", "--config-path", dest="config_path", required=True, help="path of cluster configuration file")
bootup_parser.set_defaults(handler=self.k8s_bootup)
# ./paictl.py cluster k8s-clean ...
clean_parser = cluster_parser.add_parser("k8s-clean")
clean_parser.add_argument("-p", "--config-path", dest="config_path", required=True, help="path of cluster configuration file")
clean_parser.add_argument("-f", "--force", dest="force", required=False, action="store_true", help="clean all the data forcefully")
clean_parser.set_defaults(handler=self.k8s_clean)
# ./paictl.py cluster k8s-set-env ...
env_parser = cluster_parser.add_parser("k8s-set-env")
env_parser.add_argument("-p", "--config-path", dest="config_path", help="path of cluster configuration file")
env_parser.set_defaults(handler=self.k8s_set_environment)
def k8s_bootup(self, args):
cluster_object_model_instance = cluster_object_model(args.config_path)
com = cluster_object_model_instance.kubernetes_config()
logger.info("Begin to initialize PAI k8s cluster.")
k8s_util.maintain_cluster_k8s(com, option_name="deploy", clean=True)
logger.info("Finish initializing PAI k8s cluster.")
def k8s_clean(self, args):
# just use 'k8s-clean' for testing temporarily.
cluster_object_model_instance = cluster_object_model(args.config_path)
com = cluster_object_model_instance.kubernetes_config()
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("---------- Dangerous Operation!!! ---------------")
logger.warning("------ Your k8s Cluster will be destroyed -------")
logger.warning("------ PAI service on k8s will be stopped -------")
logger.warning("--------------------------------------------------------")
if args.force:
logger.warning("--------------------------------------------------------")
logger.warning("---------- ETCD data will be cleaned. ------------")
logger.warning("----- If you wanna keep pai's user data. ---------")
logger.warning("----- Please backup etcd data. ---------")
logger.warning("----- And restore it after k8s-bootup ---------")
logger.warning("--- And restore it before deploy pai service ----")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
logger.warning("---- Please ensure you wanna do this operator, ------")
logger.warning("------- after knowing all risk above. -------")
logger.warning("--------------------------------------------------------")
logger.warning("--------------------------------------------------------")
count_input = 0
while True:
user_input = raw_input("Do you want to continue this operation? (Y/N) ")
if user_input == "N":
return
elif user_input == "Y":
break
else:
print(" Please type Y or N.")
count_input = count_input + 1
if count_input == 3:
logger.warning("3 Times......... Sorry, we will force stopping your operation.")
return
logger.info("Begin to clean up whole cluster.")
k8s_util.maintain_cluster_k8s(com, option_name="clean", force=args.force, clean=True)
logger.info("Clean up job finished")
def k8s_set_environment(self, args):
if args.config_path != None:
args.config_path = os.path.expanduser(args.config_path)
cluster_object_model_instance = cluster_object_model(args.config_path)
com = cluster_object_model_instance.kubernetes_config()
else:
com = None
kubectl_install_worker = kubectl_install.kubectl_install(com)
kubectl_install_worker.run()
| 4,332 | -2 | 130 |
bbabc0754df471ab973c778450033fe80a2750d6 | 158 | py | Python | Python Scripts/randomQuotes.py | lazydinoz/HackFest21 | 84bfbfbb2c75a6511226a87d2e947984db878ba1 | [
"MIT"
] | 1 | 2021-11-12T10:51:19.000Z | 2021-11-12T10:51:19.000Z | Python Scripts/randomQuotes.py | lazydinoz/HackFest21 | 84bfbfbb2c75a6511226a87d2e947984db878ba1 | [
"MIT"
] | null | null | null | Python Scripts/randomQuotes.py | lazydinoz/HackFest21 | 84bfbfbb2c75a6511226a87d2e947984db878ba1 | [
"MIT"
] | null | null | null | import requests
url = 'https://api.quotable.io/random'
r = requests.get(url)
quote = r.json()
print(quote['content'])
print(' -',quote['author']) | 19.75 | 39 | 0.626582 | import requests
url = 'https://api.quotable.io/random'
r = requests.get(url)
quote = r.json()
print(quote['content'])
print(' -',quote['author']) | 0 | 0 | 0 |
0cc6b3b162731a53a1206b334e0e21e3a7cabf83 | 131 | py | Python | ptvp3/__main__.py | timotheyca/persistence3 | 6a7094b0e84dfb5be47099a16717558e02329e00 | [
"BSD-3-Clause"
] | 1 | 2019-04-22T20:49:20.000Z | 2019-04-22T20:49:20.000Z | ptvp3/__main__.py | timotheyca/persistence3 | 6a7094b0e84dfb5be47099a16717558e02329e00 | [
"BSD-3-Clause"
] | 3 | 2019-04-22T19:52:45.000Z | 2019-09-13T17:53:48.000Z | ptvp3/__main__.py | timotheyca/persistence3 | 6a7094b0e84dfb5be47099a16717558e02329e00 | [
"BSD-3-Clause"
] | null | null | null | from ptvp3 import P3terpreter
import sys
import json
p3p = P3terpreter.from_param_dict(json.load(open(sys.argv[1])))
p3p.start()
| 16.375 | 63 | 0.778626 | from ptvp3 import P3terpreter
import sys
import json
p3p = P3terpreter.from_param_dict(json.load(open(sys.argv[1])))
p3p.start()
| 0 | 0 | 0 |
093af4196ce072cdb8850441d877dd75d6976695 | 1,334 | py | Python | fea_sim.py | GuohongLi/simclr-pytorch | 7e08b2433a623fdbc1c097402fded4cc69d1b54e | [
"BSD-3-Clause"
] | null | null | null | fea_sim.py | GuohongLi/simclr-pytorch | 7e08b2433a623fdbc1c097402fded4cc69d1b54e | [
"BSD-3-Clause"
] | null | null | null | fea_sim.py | GuohongLi/simclr-pytorch | 7e08b2433a623fdbc1c097402fded4cc69d1b54e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
#
########################################################################
"""
File: feature.py
Author: zhenglinhai(zhenglinhai@baidu.com)
Date: 2018/04/23 11:40:14
"""
from scipy.spatial.distance import pdist
import time
import sys
import base64
import json
import numpy as np
import time
import os
import random
import cv2
if __name__ == '__main__':
imgfea_list = []
with open(sys.argv[1]) as fp:
for line in fp:
imgfea_list.append(line.strip().split('\t'))
max_len = len(imgfea_list)
index = 0
while True:
if index+1 >= max_len:
break
name1, fea2048_1, fea128_1 = imgfea_list[index][0], [float(x) for x in imgfea_list[index][1].split(' ')], [float(x) for x in imgfea_list[index][2].split(' ')]
name2, fea2048_2, fea128_2 = imgfea_list[index+1][0], [float(x) for x in imgfea_list[index+1][1].split(' ')], [float(x) for x in imgfea_list[index+1][2].split(' ')]
dist2048 = pdist(np.vstack([fea2048_1, fea2048_2]),'cosine')
dist128 = pdist(np.vstack([fea128_1, fea128_2]),'cosine')
print(name1, name2, dist2048, dist128)
index = index + 2
| 30.318182 | 172 | 0.567466 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
#
########################################################################
"""
File: feature.py
Author: zhenglinhai(zhenglinhai@baidu.com)
Date: 2018/04/23 11:40:14
"""
from scipy.spatial.distance import pdist
import time
import sys
import base64
import json
import numpy as np
import time
import os
import random
import cv2
if __name__ == '__main__':
imgfea_list = []
with open(sys.argv[1]) as fp:
for line in fp:
imgfea_list.append(line.strip().split('\t'))
max_len = len(imgfea_list)
index = 0
while True:
if index+1 >= max_len:
break
name1, fea2048_1, fea128_1 = imgfea_list[index][0], [float(x) for x in imgfea_list[index][1].split(' ')], [float(x) for x in imgfea_list[index][2].split(' ')]
name2, fea2048_2, fea128_2 = imgfea_list[index+1][0], [float(x) for x in imgfea_list[index+1][1].split(' ')], [float(x) for x in imgfea_list[index+1][2].split(' ')]
dist2048 = pdist(np.vstack([fea2048_1, fea2048_2]),'cosine')
dist128 = pdist(np.vstack([fea128_1, fea128_2]),'cosine')
print(name1, name2, dist2048, dist128)
index = index + 2
| 0 | 0 | 0 |
4729ab2a639ac58292927dee77b2b1b6543805b8 | 1,372 | py | Python | AlgoExpert/binary_search_trees/reconstructBst.py | Muzque/Leetcode | d06365792c9ef48e0a290da00ba5e71f212554d5 | [
"MIT"
] | 1 | 2021-05-11T09:52:38.000Z | 2021-05-11T09:52:38.000Z | AlgoExpert/binary_search_trees/reconstructBst.py | Muzque/Leetcode | d06365792c9ef48e0a290da00ba5e71f212554d5 | [
"MIT"
] | null | null | null | AlgoExpert/binary_search_trees/reconstructBst.py | Muzque/Leetcode | d06365792c9ef48e0a290da00ba5e71f212554d5 | [
"MIT"
] | 1 | 2021-05-05T04:13:17.000Z | 2021-05-05T04:13:17.000Z | # This is an input class. Do not edit.
"""
def traversal(node, array):
if not array:
return
left, right = -1, -1
for i in range(len(array)):
if left == -1 and array[i] < node.value:
left = i
if right == -1 and array[i] >= node.value:
right = i
if left != -1:
node.left = BST(array[left])
bound = right if right != -1 else len(array)
traversal(node.left, array[left + 1:bound])
if right != -1:
node.right = BST(array[right])
traversal(node.right, array[right + 1:len(array)])
def reconstructBst(preOrderTraversalValues):
n = preOrderTraversalValues.pop(0)
root = BST(n)
traversal(root, preOrderTraversalValues)
return root
"""
| 28.583333 | 58 | 0.615889 | # This is an input class. Do not edit.
class BST:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
"""
def traversal(node, array):
if not array:
return
left, right = -1, -1
for i in range(len(array)):
if left == -1 and array[i] < node.value:
left = i
if right == -1 and array[i] >= node.value:
right = i
if left != -1:
node.left = BST(array[left])
bound = right if right != -1 else len(array)
traversal(node.left, array[left + 1:bound])
if right != -1:
node.right = BST(array[right])
traversal(node.right, array[right + 1:len(array)])
def reconstructBst(preOrderTraversalValues):
n = preOrderTraversalValues.pop(0)
root = BST(n)
traversal(root, preOrderTraversalValues)
return root
"""
def reconstructBst(preOrderTraversalValues):
if len(preOrderTraversalValues) == 0:
return
current = preOrderTraversalValues[0]
pt = len(preOrderTraversalValues)
for i in range(1, len(preOrderTraversalValues)):
if preOrderTraversalValues[i] >= current:
pt = i
break
left = reconstructBst(preOrderTraversalValues[1:pt])
right = reconstructBst(preOrderTraversalValues[pt:])
return BST(current, left, right)
| 557 | -11 | 71 |
717dbe2d825bab3fbda0b1024e170b79cd9586c8 | 1,691 | py | Python | tests/test_app_client.py | rhawiz/onesignal-python | 9d014e4b8a5b4c4d4c615bfb2bd37e126c793889 | [
"MIT"
] | 35 | 2016-11-19T10:59:40.000Z | 2022-01-19T06:52:19.000Z | tests/test_app_client.py | rhawiz/onesignal-python | 9d014e4b8a5b4c4d4c615bfb2bd37e126c793889 | [
"MIT"
] | 9 | 2017-05-24T12:22:15.000Z | 2021-12-17T16:14:21.000Z | tests/test_app_client.py | rhawiz/onesignal-python | 9d014e4b8a5b4c4d4c615bfb2bd37e126c793889 | [
"MIT"
] | 22 | 2017-03-08T14:48:51.000Z | 2022-01-17T12:14:49.000Z | import pytest
from onesignalclient.app_client import OneSignalAppClient
from requests.exceptions import HTTPError
from .base_test import BaseTest
| 37.577778 | 75 | 0.719101 | import pytest
from onesignalclient.app_client import OneSignalAppClient
from requests.exceptions import HTTPError
from .base_test import BaseTest
class TestAppClient(BaseTest):
def test_init_client(self, app_id, app_api_key):
client = OneSignalAppClient(
app_id=app_id, app_api_key=app_api_key
)
assert client.mode == OneSignalAppClient.MODE_APP
def test_get_headers(self, app_client):
headers = app_client.get_headers()
assert 'Content-Type' in headers
assert 'Authorization' in headers
assert app_client.app_api_key in headers['Authorization']
def test_create_notification(self, app_client, device_notification):
result = app_client.create_notification(device_notification)
assert result.get('id', False)
assert result.get('recipients', False)
def test_cancel_notification(self, app_client, notification_id):
result = app_client.cancel_notification(notification_id)
assert result.get('success', False)
def test_failed_cancel_notification(self, app_client, notification_id):
with pytest.raises(HTTPError):
app_client.cancel_notification(notification_id)
def test_csv_export(self, app_client):
csv_link = app_client.csv_export()
assert csv_link.get('csv_file_url', False)
def test_csv_export_with_extra_fields(self, app_client):
csv_link = app_client.csv_export(
extra_fields=['location', 'country', 'rooted'])
assert csv_link.get('csv_file_url', False)
def test_csv_export_not_found(self, app_client):
with pytest.raises(HTTPError):
app_client.csv_export()
| 1,297 | 9 | 238 |
11a960274da363ba993f403fe5953d2a280dac62 | 57 | py | Python | graph_adapter_tests/h_spark/__init__.py | jameslamb/hamilton | 49f11ede9a79478f31b8ded2673ec0f977033562 | [
"BSD-3-Clause-Clear"
] | 298 | 2021-10-14T20:56:45.000Z | 2022-03-30T12:55:05.000Z | graph_adapter_tests/h_spark/__init__.py | jameslamb/hamilton | 49f11ede9a79478f31b8ded2673ec0f977033562 | [
"BSD-3-Clause-Clear"
] | 58 | 2021-10-18T17:54:41.000Z | 2022-03-31T00:19:36.000Z | graph_adapter_tests/h_spark/__init__.py | jameslamb/hamilton | 49f11ede9a79478f31b8ded2673ec0f977033562 | [
"BSD-3-Clause-Clear"
] | 15 | 2021-10-30T17:45:59.000Z | 2022-03-09T14:55:53.000Z | __author__ = 'Stefan Krawczyk <stefank@cs.stanford.edu>'
| 28.5 | 56 | 0.77193 | __author__ = 'Stefan Krawczyk <stefank@cs.stanford.edu>'
| 0 | 0 | 0 |
04200d7cd9b433d985340a3dac35e036f626cf4f | 2,604 | py | Python | nlp_architect/models/most_common_word_sense.py | laugustyniak/nlp-architect | 7e9068657b575595a1f279eae59a172853cb4fe1 | [
"Apache-2.0"
] | null | null | null | nlp_architect/models/most_common_word_sense.py | laugustyniak/nlp-architect | 7e9068657b575595a1f279eae59a172853cb4fe1 | [
"Apache-2.0"
] | null | null | null | nlp_architect/models/most_common_word_sense.py | laugustyniak/nlp-architect | 7e9068657b575595a1f279eae59a172853cb4fe1 | [
"Apache-2.0"
] | 1 | 2019-12-30T14:42:54.000Z | 2019-12-30T14:42:54.000Z | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
from neon.callbacks import Callbacks
from neon.initializers import Gaussian
from neon.layers import Affine, GeneralizedCost
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import SumSquared, Softmax, Rectlin
from neon.transforms import Misclassification
| 39.454545 | 89 | 0.65745 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
from neon.callbacks import Callbacks
from neon.initializers import Gaussian
from neon.layers import Affine, GeneralizedCost
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import SumSquared, Softmax, Rectlin
from neon.transforms import Misclassification
class MostCommonWordSense:
def __init__(self, rounding, callback_args, epochs):
# setup weight initialization function
self.init = Gaussian(loc=0.0, scale=0.01)
# setup optimizer
self.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9,
stochastic_round=rounding)
# setup cost function as CrossEntropy
self.cost = GeneralizedCost(costfunc=SumSquared())
self.epochs = epochs
self.model = None
self.callback_args = callback_args
def build(self):
# setup model layers
layers = [Affine(nout=100, init=self.init, bias=self.init, activation=Rectlin()),
Affine(nout=2, init=self.init, bias=self.init, activation=Softmax())]
# initialize model object
self.model = Model(layers=layers)
def fit(self, valid_set, train_set):
# configure callbacks
callbacks = Callbacks(self.model, eval_set=valid_set, **self.callback_args)
self.model.fit(train_set, optimizer=self.optimizer, num_epochs=self.epochs,
cost=self.cost, callbacks=callbacks)
def save(self, save_path):
self.model.save_params(save_path)
def load(self, model_path):
self.model = Model(model_path)
def eval(self, valid_set):
eval_rate = self.model.eval(valid_set, metric=Misclassification())
return eval_rate
def get_outputs(self, valid_set):
return self.model.get_outputs(valid_set)
| 1,330 | 5 | 212 |
468117e32a0214679932e30d2c427c8902f0282a | 1,325 | py | Python | pcc/websocket_writer.py | ACoolUsername9001/python-computercraft | 47c7a97dd013a75ee8e6f9e0718fb702c189df90 | [
"Unlicense"
] | null | null | null | pcc/websocket_writer.py | ACoolUsername9001/python-computercraft | 47c7a97dd013a75ee8e6f9e0718fb702c189df90 | [
"Unlicense"
] | null | null | null | pcc/websocket_writer.py | ACoolUsername9001/python-computercraft | 47c7a97dd013a75ee8e6f9e0718fb702c189df90 | [
"Unlicense"
] | null | null | null | from dataclasses import dataclass, field
from typing import TypeVar
import websocket
from io import IOBase
import json
WebSocket = TypeVar('WebSocket')
@dataclass
| 24.090909 | 84 | 0.630943 | from dataclasses import dataclass, field
from typing import TypeVar
import websocket
from io import IOBase
import json
WebSocket = TypeVar('WebSocket')
class ComputerCraftException(Exception):
pass
@dataclass
class CCWebsocketWriter(IOBase):
url: str
c_id: str
c_pass: str
websocket: WebSocket = field(default_factory=lambda: websocket.WebSocket())
def connect(self):
self.websocket.connect(self.url)
self.websocket.send(self.c_id)
self.websocket.send(self.c_pass)
def write(self, data, write=True):
if write:
self.websocket.send(data)
res = self.websocket.recv()
try:
res = ReturnValue(self, json.loads(res))
except json.decoder.JSONDecodeError:
raise ComputerCraftException(res)
return res
else:
return data
def disconnect(self):
self.websocket.close()
class ReturnValue(list):
def __init__(self, writer, *args, **kwargs):
super(ReturnValue, self).__init__(*args, **kwargs)
self.writer = writer
def __getattr__(self, item):
return self.writer.write(f'returned[1].{item}')
def __call__(self, *args):
return self.writer.write(f'returned[1]({",".join((str(x) for x in args))})')
| 758 | 246 | 152 |
555596172cf212e4f391806d87e9b2e6a7d8e3ee | 59 | py | Python | investments/__init__.py | neewy/TinkoffInvestmentsAnalyser | 049b419ea090d17c49016ebfa1ae0f8cde738b65 | [
"Apache-2.0"
] | 3 | 2020-06-13T16:29:23.000Z | 2022-02-11T19:40:07.000Z | investments/__init__.py | neewy/TinkoffInvestmentsAnalyser | 049b419ea090d17c49016ebfa1ae0f8cde738b65 | [
"Apache-2.0"
] | null | null | null | investments/__init__.py | neewy/TinkoffInvestmentsAnalyser | 049b419ea090d17c49016ebfa1ae0f8cde738b65 | [
"Apache-2.0"
] | 1 | 2020-06-22T18:42:04.000Z | 2020-06-22T18:42:04.000Z | from .investments import *
manager = InvestmentsManager()
| 14.75 | 30 | 0.779661 | from .investments import *
manager = InvestmentsManager()
| 0 | 0 | 0 |
22c66a9a28cd90e8c60bd459fb52e873cbed9ee3 | 14,978 | py | Python | hio-yocto-bsp/sources/poky/bitbake/lib/toaster/orm/models.py | qiangzai00001/hio-prj | 060ff97fe21093b1369db78109d5b730b2b181c8 | [
"MIT"
] | null | null | null | hio-yocto-bsp/sources/poky/bitbake/lib/toaster/orm/models.py | qiangzai00001/hio-prj | 060ff97fe21093b1369db78109d5b730b2b181c8 | [
"MIT"
] | null | null | null | hio-yocto-bsp/sources/poky/bitbake/lib/toaster/orm/models.py | qiangzai00001/hio-prj | 060ff97fe21093b1369db78109d5b730b2b181c8 | [
"MIT"
] | null | null | null | #
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Toaster Implementation
#
# Copyright (C) 2013 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from django.db import models
from django.db.models import F
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
| 39.209424 | 279 | 0.706837 | #
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Toaster Implementation
#
# Copyright (C) 2013 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from django.db import models
from django.db.models import F
from django.utils.encoding import python_2_unicode_compatible
class Build(models.Model):
SUCCEEDED = 0
FAILED = 1
IN_PROGRESS = 2
BUILD_OUTCOME = (
(SUCCEEDED, 'Succeeded'),
(FAILED, 'Failed'),
(IN_PROGRESS, 'In Progress'),
)
search_allowed_fields = ['machine', 'cooker_log_path', "target__target", "target__target_image_file__file_name"]
machine = models.CharField(max_length=100)
distro = models.CharField(max_length=100)
distro_version = models.CharField(max_length=100)
started_on = models.DateTimeField()
completed_on = models.DateTimeField()
timespent = models.IntegerField(default=0)
outcome = models.IntegerField(choices=BUILD_OUTCOME, default=IN_PROGRESS)
errors_no = models.IntegerField(default=0)
warnings_no = models.IntegerField(default=0)
cooker_log_path = models.CharField(max_length=500)
build_name = models.CharField(max_length=100)
bitbake_version = models.CharField(max_length=50)
def get_sorted_target_list(self):
tgts = Target.objects.filter(build_id = self.id).order_by( 'target' );
return( tgts );
@python_2_unicode_compatible
class Target(models.Model):
search_allowed_fields = ['target', 'file_name']
build = models.ForeignKey(Build)
target = models.CharField(max_length=100)
is_image = models.BooleanField(default = False)
image_size = models.IntegerField(default=0)
license_manifest_path = models.CharField(max_length=500, null=True)
def package_count(self):
return Target_Installed_Package.objects.filter(target_id__exact=self.id).count()
def __str__(self):
return self.target
class Target_Image_File(models.Model):
target = models.ForeignKey(Target)
file_name = models.FilePathField(max_length=100)
file_size = models.IntegerField()
class Target_File(models.Model):
ITYPE_REGULAR = 1
ITYPE_DIRECTORY = 2
ITYPE_SYMLINK = 3
ITYPE_SOCKET = 4
ITYPE_FIFO = 5
ITYPE_CHARACTER = 6
ITYPE_BLOCK = 7
ITYPES = ( (ITYPE_REGULAR ,'regular'),
( ITYPE_DIRECTORY ,'directory'),
( ITYPE_SYMLINK ,'symlink'),
( ITYPE_SOCKET ,'socket'),
( ITYPE_FIFO ,'fifo'),
( ITYPE_CHARACTER ,'character'),
( ITYPE_BLOCK ,'block'),
)
target = models.ForeignKey(Target)
path = models.FilePathField()
size = models.IntegerField()
inodetype = models.IntegerField(choices = ITYPES)
permission = models.CharField(max_length=16)
owner = models.CharField(max_length=128)
group = models.CharField(max_length=128)
directory = models.ForeignKey('Target_File', related_name="directory_set", null=True)
sym_target = models.ForeignKey('Target_File', related_name="symlink_set", null=True)
class TaskManager(models.Manager):
def related_setscene(self, task_object):
return Task.objects.filter(task_executed=True, build = task_object.build, recipe = task_object.recipe, task_name=task_object.task_name+"_setscene")
class Task(models.Model):
SSTATE_NA = 0
SSTATE_MISS = 1
SSTATE_FAILED = 2
SSTATE_RESTORED = 3
SSTATE_RESULT = (
(SSTATE_NA, 'Not Applicable'), # For rest of tasks, but they still need checking.
(SSTATE_MISS, 'File not in cache'), # the sstate object was not found
(SSTATE_FAILED, 'Failed'), # there was a pkg, but the script failed
(SSTATE_RESTORED, 'Succeeded'), # successfully restored
)
CODING_NA = 0
CODING_PYTHON = 2
CODING_SHELL = 3
TASK_CODING = (
(CODING_NA, 'N/A'),
(CODING_PYTHON, 'Python'),
(CODING_SHELL, 'Shell'),
)
OUTCOME_NA = -1
OUTCOME_SUCCESS = 0
OUTCOME_COVERED = 1
OUTCOME_CACHED = 2
OUTCOME_PREBUILT = 3
OUTCOME_FAILED = 4
OUTCOME_EMPTY = 5
TASK_OUTCOME = (
(OUTCOME_NA, 'Not Available'),
(OUTCOME_SUCCESS, 'Succeeded'),
(OUTCOME_COVERED, 'Covered'),
(OUTCOME_CACHED, 'Cached'),
(OUTCOME_PREBUILT, 'Prebuilt'),
(OUTCOME_FAILED, 'Failed'),
(OUTCOME_EMPTY, 'Empty'),
)
TASK_OUTCOME_HELP = (
(OUTCOME_SUCCESS, 'This task successfully completed'),
(OUTCOME_COVERED, 'This task did not run because its output is provided by another task'),
(OUTCOME_CACHED, 'This task restored output from the sstate-cache directory or mirrors'),
(OUTCOME_PREBUILT, 'This task did not run because its outcome was reused from a previous build'),
(OUTCOME_FAILED, 'This task did not complete'),
(OUTCOME_EMPTY, 'This task has no executable content'),
(OUTCOME_NA, ''),
)
search_allowed_fields = [ "recipe__name", "recipe__version", "task_name", "logfile" ]
objects = TaskManager()
def get_related_setscene(self):
return Task.objects.related_setscene(self)
def get_outcome_text(self):
return Task.TASK_OUTCOME[self.outcome + 1][1]
def get_outcome_help(self):
return Task.TASK_OUTCOME_HELP[self.outcome][1]
def get_sstate_text(self):
if self.sstate_result==Task.SSTATE_NA:
return ''
else:
return Task.SSTATE_RESULT[self.sstate_result][1]
def get_executed_display(self):
if self.task_executed:
return "Executed"
return "Not Executed"
def get_description(self):
helptext = HelpText.objects.filter(key=self.task_name, area=HelpText.VARIABLE, build=self.build)
try:
return helptext[0].text
except IndexError:
return ''
build = models.ForeignKey(Build, related_name='task_build')
order = models.IntegerField(null=True)
task_executed = models.BooleanField(default=False) # True means Executed, False means Not/Executed
outcome = models.IntegerField(choices=TASK_OUTCOME, default=OUTCOME_NA)
sstate_checksum = models.CharField(max_length=100, blank=True)
path_to_sstate_obj = models.FilePathField(max_length=500, blank=True)
recipe = models.ForeignKey('Recipe', related_name='build_recipe')
task_name = models.CharField(max_length=100)
source_url = models.FilePathField(max_length=255, blank=True)
work_directory = models.FilePathField(max_length=255, blank=True)
script_type = models.IntegerField(choices=TASK_CODING, default=CODING_NA)
line_number = models.IntegerField(default=0)
disk_io = models.IntegerField(null=True)
cpu_usage = models.DecimalField(max_digits=6, decimal_places=2, null=True)
elapsed_time = models.DecimalField(max_digits=6, decimal_places=2, null=True)
sstate_result = models.IntegerField(choices=SSTATE_RESULT, default=SSTATE_NA)
message = models.CharField(max_length=240)
logfile = models.FilePathField(max_length=255, blank=True)
outcome_text = property(get_outcome_text)
sstate_text = property(get_sstate_text)
class Meta:
ordering = ('order', 'recipe' ,)
unique_together = ('build', 'recipe', 'task_name', )
class Task_Dependency(models.Model):
task = models.ForeignKey(Task, related_name='task_dependencies_task')
depends_on = models.ForeignKey(Task, related_name='task_dependencies_depends')
class Package(models.Model):
search_allowed_fields = ['name', 'version', 'revision', 'recipe__name', 'recipe__version', 'recipe__license', 'recipe__layer_version__layer__name', 'recipe__layer_version__branch', 'recipe__layer_version__commit', 'recipe__layer_version__layer__local_path', 'installed_name']
build = models.ForeignKey('Build')
recipe = models.ForeignKey('Recipe', null=True)
name = models.CharField(max_length=100)
installed_name = models.CharField(max_length=100, default='')
version = models.CharField(max_length=100, blank=True)
revision = models.CharField(max_length=32, blank=True)
summary = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=200, blank=True)
size = models.IntegerField(default=0)
installed_size = models.IntegerField(default=0)
section = models.CharField(max_length=80, blank=True)
license = models.CharField(max_length=80, blank=True)
class Package_DependencyManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return super(Package_DependencyManager, self).get_query_set().exclude(package_id = F('depends_on__id'))
class Package_Dependency(models.Model):
TYPE_RDEPENDS = 0
TYPE_TRDEPENDS = 1
TYPE_RRECOMMENDS = 2
TYPE_TRECOMMENDS = 3
TYPE_RSUGGESTS = 4
TYPE_RPROVIDES = 5
TYPE_RREPLACES = 6
TYPE_RCONFLICTS = 7
' TODO: bpackage should be changed to remove the DEPENDS_TYPE access '
DEPENDS_TYPE = (
(TYPE_RDEPENDS, "depends"),
(TYPE_TRDEPENDS, "depends"),
(TYPE_TRECOMMENDS, "recommends"),
(TYPE_RRECOMMENDS, "recommends"),
(TYPE_RSUGGESTS, "suggests"),
(TYPE_RPROVIDES, "provides"),
(TYPE_RREPLACES, "replaces"),
(TYPE_RCONFLICTS, "conflicts"),
)
''' Indexed by dep_type, in view order, key for short name and help
description which when viewed will be printf'd with the
package name.
'''
DEPENDS_DICT = {
TYPE_RDEPENDS : ("depends", "%s is required to run %s"),
TYPE_TRDEPENDS : ("depends", "%s is required to run %s"),
TYPE_TRECOMMENDS : ("recommends", "%s extends the usability of %s"),
TYPE_RRECOMMENDS : ("recommends", "%s extends the usability of %s"),
TYPE_RSUGGESTS : ("suggests", "%s is suggested for installation with %s"),
TYPE_RPROVIDES : ("provides", "%s is provided by %s"),
TYPE_RREPLACES : ("replaces", "%s is replaced by %s"),
TYPE_RCONFLICTS : ("conflicts", "%s conflicts with %s, which will not be installed if this package is not first removed"),
}
package = models.ForeignKey(Package, related_name='package_dependencies_source')
depends_on = models.ForeignKey(Package, related_name='package_dependencies_target') # soft dependency
dep_type = models.IntegerField(choices=DEPENDS_TYPE)
target = models.ForeignKey(Target, null=True)
objects = Package_DependencyManager()
class Target_Installed_Package(models.Model):
target = models.ForeignKey(Target)
package = models.ForeignKey(Package, related_name='buildtargetlist_package')
class Package_File(models.Model):
package = models.ForeignKey(Package, related_name='buildfilelist_package')
path = models.FilePathField(max_length=255, blank=True)
size = models.IntegerField()
class Recipe(models.Model):
search_allowed_fields = ['name', 'version', 'file_path', 'section', 'license', 'layer_version__layer__name', 'layer_version__branch', 'layer_version__commit', 'layer_version__layer__local_path']
name = models.CharField(max_length=100, blank=True)
version = models.CharField(max_length=100, blank=True)
layer_version = models.ForeignKey('Layer_Version', related_name='recipe_layer_version')
summary = models.CharField(max_length=100, blank=True)
description = models.CharField(max_length=100, blank=True)
section = models.CharField(max_length=100, blank=True)
license = models.CharField(max_length=200, blank=True)
homepage = models.URLField(blank=True)
bugtracker = models.URLField(blank=True)
file_path = models.FilePathField(max_length=255)
class Recipe_DependencyManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return super(Recipe_DependencyManager, self).get_query_set().exclude(recipe_id = F('depends_on__id'))
class Recipe_Dependency(models.Model):
TYPE_DEPENDS = 0
TYPE_RDEPENDS = 1
DEPENDS_TYPE = (
(TYPE_DEPENDS, "depends"),
(TYPE_RDEPENDS, "rdepends"),
)
recipe = models.ForeignKey(Recipe, related_name='r_dependencies_recipe')
depends_on = models.ForeignKey(Recipe, related_name='r_dependencies_depends')
dep_type = models.IntegerField(choices=DEPENDS_TYPE)
objects = Recipe_DependencyManager()
class Layer(models.Model):
name = models.CharField(max_length=100)
local_path = models.FilePathField(max_length=255)
layer_index_url = models.URLField()
class Layer_Version(models.Model):
build = models.ForeignKey(Build, related_name='layer_version_build')
layer = models.ForeignKey(Layer, related_name='layer_version_layer')
branch = models.CharField(max_length=50)
commit = models.CharField(max_length=100)
priority = models.IntegerField()
class Variable(models.Model):
search_allowed_fields = ['variable_name', 'variable_value',
'vhistory__file_name', "description"]
build = models.ForeignKey(Build, related_name='variable_build')
variable_name = models.CharField(max_length=100)
variable_value = models.TextField(blank=True)
changed = models.BooleanField(default=False)
human_readable_name = models.CharField(max_length=200)
description = models.TextField(blank=True)
class VariableHistory(models.Model):
variable = models.ForeignKey(Variable, related_name='vhistory')
value = models.TextField(blank=True)
file_name = models.FilePathField(max_length=255)
line_number = models.IntegerField(null=True)
operation = models.CharField(max_length=16)
class HelpText(models.Model):
VARIABLE = 0
HELPTEXT_AREA = ((VARIABLE, 'variable'), )
build = models.ForeignKey(Build, related_name='helptext_build')
area = models.IntegerField(choices=HELPTEXT_AREA)
key = models.CharField(max_length=100)
text = models.TextField()
class LogMessage(models.Model):
INFO = 0
WARNING = 1
ERROR = 2
LOG_LEVEL = ( (INFO, "info"),
(WARNING, "warn"),
(ERROR, "error") )
build = models.ForeignKey(Build)
task = models.ForeignKey(Task, blank = True, null=True)
level = models.IntegerField(choices=LOG_LEVEL, default=INFO)
message=models.CharField(max_length=240)
pathname = models.FilePathField(max_length=255, blank=True)
lineno = models.IntegerField(null=True)
| 1,274 | 12,221 | 508 |
180699e09004bdeab130800bdcc53eee329669db | 737 | py | Python | setup.py | jorgeviz/robosuite | 7fca50214dfa4978c9f3b5db0016b35a2920b0c9 | [
"MIT"
] | null | null | null | setup.py | jorgeviz/robosuite | 7fca50214dfa4978c9f3b5db0016b35a2920b0c9 | [
"MIT"
] | null | null | null | setup.py | jorgeviz/robosuite | 7fca50214dfa4978c9f3b5db0016b35a2920b0c9 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="robosuite",
packages=[
package for package in find_packages() if package.startswith("robosuite")
],
install_requires=[
"numpy>=1.13.3",
"scipy>=1.3.1",
"opencv-python>=3.4.3.18",
# "mujoco-py<1.50.2,>=1.50.1",
],
eager_resources=['*'],
include_package_data=True,
python_requires='>=3',
description="Surreal Robotics Suite: Standardized and Accessible Robot Manipulation Benchmark in Physics Simulation",
author="Yuke Zhu, Jiren Zhu, Ajay Mandlekar, Joan Creus-Costa, Anchit Gupta",
url="https://github.com/StanfordVL/robosuite",
author_email="yukez@cs.stanford.edu",
version="0.1.0",
)
| 30.708333 | 121 | 0.652646 | from setuptools import setup, find_packages
setup(
name="robosuite",
packages=[
package for package in find_packages() if package.startswith("robosuite")
],
install_requires=[
"numpy>=1.13.3",
"scipy>=1.3.1",
"opencv-python>=3.4.3.18",
# "mujoco-py<1.50.2,>=1.50.1",
],
eager_resources=['*'],
include_package_data=True,
python_requires='>=3',
description="Surreal Robotics Suite: Standardized and Accessible Robot Manipulation Benchmark in Physics Simulation",
author="Yuke Zhu, Jiren Zhu, Ajay Mandlekar, Joan Creus-Costa, Anchit Gupta",
url="https://github.com/StanfordVL/robosuite",
author_email="yukez@cs.stanford.edu",
version="0.1.0",
)
| 0 | 0 | 0 |
d17f15fc3600e6e17ac9cae2e6c9b5d0dcf93b3f | 58 | py | Python | pyRoute13/api/generators/__init__.py | ianphil/pyRoute13 | 6aa830ebb0adb563fc3023527140664c4ac9e1b1 | [
"MIT"
] | null | null | null | pyRoute13/api/generators/__init__.py | ianphil/pyRoute13 | 6aa830ebb0adb563fc3023527140664c4ac9e1b1 | [
"MIT"
] | null | null | null | pyRoute13/api/generators/__init__.py | ianphil/pyRoute13 | 6aa830ebb0adb563fc3023527140664c4ac9e1b1 | [
"MIT"
] | null | null | null | __all__ = ["jobs", "staffing_plan", "transfer_generator"]
| 29 | 57 | 0.724138 | __all__ = ["jobs", "staffing_plan", "transfer_generator"]
| 0 | 0 | 0 |
bfbd89c1b2f813171069a9700420ea26b48c6585 | 4,625 | py | Python | ssc.py | zmcneilly/animated-journey | 74baaee3d03dbbb45451d9ab5592023f92927ded | [
"MIT"
] | null | null | null | ssc.py | zmcneilly/animated-journey | 74baaee3d03dbbb45451d9ab5592023f92927ded | [
"MIT"
] | null | null | null | ssc.py | zmcneilly/animated-journey | 74baaee3d03dbbb45451d9ab5592023f92927ded | [
"MIT"
] | null | null | null | import argparse
import re
import os
import ssh_config
import paramiko
import getpass
from pathlib import Path
from ssh_config.hosts import ping
def open_ssh_key(path: str, password: str=None) -> (paramiko.pkey.PKey, str):
"""
Function will return the Pkey object for the path specified
:param path: The path to the Pkey object
:param password: (Optional) The password for the private key
:return: Loaded Pkey
"""
with open(path, "r") as __f:
for key_type in [paramiko.DSSKey, paramiko.RSAKey, paramiko.ECDSAKey]:
try:
key_type.from_private_key(__f)
except paramiko.PasswordRequiredException:
if password is None:
password = getpass.getpass("SSH Key password: ")
try:
return key_type.from_private_key(__f, password), password
except paramiko.SSHException:
pass
if __name__ == "__main__":
main() | 39.87069 | 129 | 0.643243 | import argparse
import re
import os
import ssh_config
import paramiko
import getpass
from pathlib import Path
from ssh_config.hosts import ping
def prompt_for_input(prompt: str="Continue? [y/n]"):
resp = input(prompt).lower().strip()
if resp[0] == "y":
return True
elif resp[0] == "n":
return False
else:
print("Invalid input")
return prompt_for_input(prompt)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, help="Port to connect to", default=22)
parser.add_argument("--binary", type=str, default="/usr/bin/ssh", help="SSH binary to use")
parser.add_argument("--key", type=str, default="", help="SSH Key to install on remote host")
parser.add_argument("--output", help="Bash script to write to")
parser.add_argument('connection_string', type=str, help="SSH Connection string")
return parser.parse_args()
def open_ssh_key(path: str, password: str=None) -> (paramiko.pkey.PKey, str):
"""
Function will return the Pkey object for the path specified
:param path: The path to the Pkey object
:param password: (Optional) The password for the private key
:return: Loaded Pkey
"""
with open(path, "r") as __f:
for key_type in [paramiko.DSSKey, paramiko.RSAKey, paramiko.ECDSAKey]:
try:
key_type.from_private_key(__f)
except paramiko.PasswordRequiredException:
if password is None:
password = getpass.getpass("SSH Key password: ")
try:
return key_type.from_private_key(__f, password), password
except paramiko.SSHException:
pass
def main():
# Handle arguments
args = parse_args()
match = re.search(r'^(?:([^@]+)@)?(.*)$', args.connection_string)
hostname = match.group(2)
username = match.group(1)
ssh_args = "-o GlobalKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
# Do inital setup of host and user
if username is None or username == "":
username = os.environ["USER"]
if not ping(hostname):
if not prompt_for_input("{} not responding; continue? [y/n]".format(hostname)):
raise ConnectionError("Cannot reach {}".format(hostname))
host_config = ssh_config.load_host_configuration_file(hostname)
user_config = ssh_config.SshUser(username)
if host_config is None:
pub_key = ssh_config.get_host_key(hostname, args.port)
host_config = ssh_config.SshConfig(hostname, args.port, public_host_key=pub_key, users={user_config})
for user in host_config.users:
if user.name == username:
user_config = user
# Validate the SSH host key
if not host_config.validate_public_host_key():
if prompt_for_input("Public key doesn't match records; continue? [y/n]"):
host_config.public_host_key = ssh_config.get_host_key(host_config.working_alias, host_config.port)
else:
raise ConnectionError("Public host key is wrong! Aborting")
# Check if key was specified at the command line
if args.key != "":
user_config.key_location = str(Path(args.key).resolve())
user_config.desire_key = True
while not host_config.validate_login(user_config):
user_config.passwd = getpass.getpass("Can't authenticate. {con_str}'s password: ".format(con_str=args.connection_string))
host_config.users.update({user_config})
# Paramiko, at least, has access.
# Handle key installation
if user_config.desire_key and Path(user_config.key_location).exists():
priv_key, priv_key_password = open_ssh_key(user_config.key_location, user_config.key_password)
pub_key = priv_key.get_base64()
if priv_key_password is not None:
user_config.key_password = priv_key_password
ssh = host_config.get_ssh_connection(user_config)
sftp = ssh.open_sftp()
# Create bash script
with open(str(Path(args.output).resolve()), "w") as __f:
connection = "{ssh} {arguments} {user}@{host}\n\n".format(ssh=args.binary,
arguments=ssh_args,
user=user_config.name,
host=host_config.working_alias)
__f.write("#!/bin/bash\n\n")
__f.write(connection)
# Save any changes
host_config.users.update({user_config})
ssh_config.add_to_configuration(host_config)
if __name__ == "__main__":
main() | 3,583 | 0 | 69 |
90db707433251f5390a99d5b753741016af32f77 | 298 | py | Python | backend/treeckle/organizations/models.py | CAPTxTreeckle/Treeckle-3.0 | 0c8a1a1db5685a22968644deabdd79b525ff0140 | [
"MIT"
] | null | null | null | backend/treeckle/organizations/models.py | CAPTxTreeckle/Treeckle-3.0 | 0c8a1a1db5685a22968644deabdd79b525ff0140 | [
"MIT"
] | 1 | 2021-11-28T06:18:51.000Z | 2021-11-28T06:18:51.000Z | backend/treeckle/organizations/models.py | CAPTxTreeckle/Treeckle-3.0 | 0c8a1a1db5685a22968644deabdd79b525ff0140 | [
"MIT"
] | 1 | 2021-08-02T09:20:10.000Z | 2021-08-02T09:20:10.000Z | from django.db import models
from treeckle.common.models import TimestampedModel
# Create your models here.
| 22.923077 | 56 | 0.718121 | from django.db import models
from treeckle.common.models import TimestampedModel
# Create your models here.
class Organization(TimestampedModel):
name = models.CharField(max_length=255, unique=True)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
| 22 | 145 | 22 |
757845efd7aed6aba41a24fa9b9c62d23336816e | 1,471 | py | Python | classes/tdevent.py | tshrove/technicaldebtprediction | 11c5d84b4976cc7e93cf79294403c7615cce81bd | [
"MIT"
] | null | null | null | classes/tdevent.py | tshrove/technicaldebtprediction | 11c5d84b4976cc7e93cf79294403c7615cce81bd | [
"MIT"
] | null | null | null | classes/tdevent.py | tshrove/technicaldebtprediction | 11c5d84b4976cc7e93cf79294403c7615cce81bd | [
"MIT"
] | null | null | null | from dateutil.parser import parse
from configs import ConfigFactory
config = ConfigFactory.factory()
| 23.725806 | 86 | 0.624745 | from dateutil.parser import parse
from configs import ConfigFactory
config = ConfigFactory.factory()
class TDEvent:
###
# Private Variables
###
__raw_issue = None
__is_resolved = None
###
# Public Methods
###
def __init__(self, jira_issue, is_resolved):
self.__raw_issue = jira_issue
self.__is_resolved = is_resolved
@property
def event_value(self):
if self.__raw_issue.fields.priority is not None:
_event_val = TDEvent.get_debt_value(self.__raw_issue.fields.priority.name)
else:
_event_val = 3
if self.__is_resolved:
return _event_val * -1
else:
return _event_val
@property
def event_date(self):
if self.__is_resolved is True:
return self.__getresolveddate()
else:
return self.__getcreateddate()
###
# Private Methods
###
def __getresolveddate(self):
if self.__raw_issue.fields.resolutiondate is not None:
return parse(self.__raw_issue.fields.resolutiondate)
else:
return None
def __getcreateddate(self):
return parse(self.__raw_issue.fields.created)
###
# Static Methods
###
@staticmethod
def get_debt_value(priority_val_str):
""" Gets the amount of technical debt related to the issue. """
_val = config.get_priority_values()[priority_val_str]
return _val
| 772 | 573 | 23 |
c042d99239011a58990a2b2c696a0be2d222ca09 | 484 | py | Python | ssc/python/f/fdi.py | xonoer/tssc | c53e76f22e3c496bcd2123fd2765bb7e461dd8ef | [
"MIT"
] | 13 | 2015-04-24T18:42:39.000Z | 2021-01-08T04:09:33.000Z | examples/fdi.py | arlionn/python-in-stata | 49046a270a5cc3ffdfdd634043277584dad13638 | [
"MIT"
] | 1 | 2018-05-22T12:28:48.000Z | 2018-05-22T12:28:48.000Z | examples/fdi.py | arlionn/python-in-stata | 49046a270a5cc3ffdfdd634043277584dad13638 | [
"MIT"
] | 8 | 2015-08-28T20:20:14.000Z | 2021-01-08T04:09:38.000Z | try:
from sympy import *
# this kind of import puts a lot of stuff in global namespace;
# usually better to import what you need or import sympy
except ImportError:
st_local("importerror", "True")
raise
x = Symbol('x')
f = Function('f')
f = eval(st_local("anything").replace('^','**'))
st_local("anything", st_local("anything").replace("**", "^"))
st_local("fprime", repr(f.diff(x)).replace("**", "^"))
st_local("fint", repr(f.integrate(x)).replace("**", "^"))
| 28.470588 | 66 | 0.632231 | try:
from sympy import *
# this kind of import puts a lot of stuff in global namespace;
# usually better to import what you need or import sympy
except ImportError:
st_local("importerror", "True")
raise
x = Symbol('x')
f = Function('f')
f = eval(st_local("anything").replace('^','**'))
st_local("anything", st_local("anything").replace("**", "^"))
st_local("fprime", repr(f.diff(x)).replace("**", "^"))
st_local("fint", repr(f.integrate(x)).replace("**", "^"))
| 0 | 0 | 0 |
fea552e114eb820acec0c82c3f08e997cca0a6a5 | 287 | py | Python | python-sense-hat-temp-hum.py | leon-anavi/python-sense-hat-scripts | 63bb966b7b2db44c8cef9f4434a2bba067a473a0 | [
"MIT"
] | null | null | null | python-sense-hat-temp-hum.py | leon-anavi/python-sense-hat-scripts | 63bb966b7b2db44c8cef9f4434a2bba067a473a0 | [
"MIT"
] | null | null | null | python-sense-hat-temp-hum.py | leon-anavi/python-sense-hat-scripts | 63bb966b7b2db44c8cef9f4434a2bba067a473a0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from sense_hat import SenseHat
sense = SenseHat()
sense.clear()
temp = sense.get_temperature_from_pressure()
temp = round(temp, 1)
print("Temperature: %s C" % temp)
humidity = sense.get_humidity()
humidity = round(humidity, 2)
print("Humidity: %s %%rH" % humidity)
| 19.133333 | 44 | 0.721254 | #!/usr/bin/python
from sense_hat import SenseHat
sense = SenseHat()
sense.clear()
temp = sense.get_temperature_from_pressure()
temp = round(temp, 1)
print("Temperature: %s C" % temp)
humidity = sense.get_humidity()
humidity = round(humidity, 2)
print("Humidity: %s %%rH" % humidity)
| 0 | 0 | 0 |
496455074737adf0cd223268168d746bc2b16dbd | 1,662 | py | Python | Day 38/NonDivisibleSubset.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 38/NonDivisibleSubset.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 38/NonDivisibleSubset.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | '''
Given a set of distinct integers, print the size of a maximal subset of where the sum of any numbers in is not evenly divisible by .
For example, the array and . One of the arrays that can be created is . Another is . After testing all permutations, the maximum length solution array has elements.
Function Description
Complete the nonDivisibleSubset function in the editor below. It should return an integer representing the length of the longest subset of meeting the criteria.
nonDivisibleSubset has the following parameter(s):
S: an array of integers
k: an integer
Input Format
The first line contains space-separated integers, and , the number of values in and the non factor.
The second line contains space-separated integers describing , the unique values of the set.
Constraints
All of the given numbers are distinct.
Output Format
Print the size of the largest possible subset ().
Sample Input
4 3
1 7 2 4
Sample Output
3
'''
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
s = list(map(int, input().rstrip().split()))
result = nonDivisibleSubset(k, s)
fptr.write(str(result) + '\n')
fptr.close()
| 24.80597 | 166 | 0.687124 | '''
Given a set of distinct integers, print the size of a maximal subset of where the sum of any numbers in is not evenly divisible by .
For example, the array and . One of the arrays that can be created is . Another is . After testing all permutations, the maximum length solution array has elements.
Function Description
Complete the nonDivisibleSubset function in the editor below. It should return an integer representing the length of the longest subset of meeting the criteria.
nonDivisibleSubset has the following parameter(s):
S: an array of integers
k: an integer
Input Format
The first line contains space-separated integers, and , the number of values in and the non factor.
The second line contains space-separated integers describing , the unique values of the set.
Constraints
All of the given numbers are distinct.
Output Format
Print the size of the largest possible subset ().
Sample Input
4 3
1 7 2 4
Sample Output
3
'''
#!/bin/python3
import math
import os
import random
import re
import sys
def nonDivisibleSubset(k, S):
res = [0 for i in range (0, k)]
for i in S:
res[i%k] += 1
maxN = 1 if res[0]>0 else 0
for i in range(1,k//2+1):
if (i != k-i):
maxN += max(res[i],res[k-i])
else:
maxN += 1
return maxN
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
s = list(map(int, input().rstrip().split()))
result = nonDivisibleSubset(k, s)
fptr.write(str(result) + '\n')
fptr.close()
| 260 | 0 | 23 |
91b263ab9961d18bc25a570f6bc10f7bfdbc22e2 | 566 | py | Python | libs/yowsup/yowsup/yowsup/layers/protocol_media/protocolentities/test_message_media.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 22 | 2017-07-14T20:01:17.000Z | 2022-03-08T14:22:39.000Z | libs/yowsup/yowsup/yowsup/layers/protocol_media/protocolentities/test_message_media.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 6 | 2017-07-14T21:03:50.000Z | 2021-06-10T19:08:32.000Z | libs/yowsup/yowsup/yowsup/layers/protocol_media/protocolentities/test_message_media.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 13 | 2017-07-14T20:13:14.000Z | 2020-11-12T08:06:05.000Z | from yowsup.layers.protocol_media.protocolentities.message_media import MediaMessageProtocolEntity
from yowsup.layers.protocol_messages.protocolentities.test_message import MessageProtocolEntityTest
from yowsup.structs import ProtocolTreeNode
| 51.454545 | 99 | 0.810954 | from yowsup.layers.protocol_media.protocolentities.message_media import MediaMessageProtocolEntity
from yowsup.layers.protocol_messages.protocolentities.test_message import MessageProtocolEntityTest
from yowsup.structs import ProtocolTreeNode
class MediaMessageProtocolEntityTest(MessageProtocolEntityTest):
def setUp(self):
super(MediaMessageProtocolEntityTest, self).setUp()
self.ProtocolEntity = MediaMessageProtocolEntity
mediaNode = ProtocolTreeNode("media", {"type":"MEDIA_TYPE"}, None, None)
self.node.addChild(mediaNode)
| 231 | 43 | 49 |
02546173bb38fe0ed46188dd29c53ee414c43b00 | 1,070 | py | Python | Tareas/PrimerBimestre/TareaNro1Ficheros/ReadFiles/main.py | renatojobal/python_sol | d8d0434755679d62caa34d0ea227aacc2aa1ad6d | [
"MIT"
] | null | null | null | Tareas/PrimerBimestre/TareaNro1Ficheros/ReadFiles/main.py | renatojobal/python_sol | d8d0434755679d62caa34d0ea227aacc2aa1ad6d | [
"MIT"
] | null | null | null | Tareas/PrimerBimestre/TareaNro1Ficheros/ReadFiles/main.py | renatojobal/python_sol | d8d0434755679d62caa34d0ea227aacc2aa1ad6d | [
"MIT"
] | 2 | 2021-02-02T03:33:44.000Z | 2021-02-02T03:34:20.000Z | # Parte 1
# with open('message.txt','r') as file:
# print(file.read())
#
# Parte 2
# with open('alice.txt','r') as file:
# (file.read())
# Parte 2
#with open('alice.txt','r') as file:
# line_counter = 0
# for line in file:
# print(line, end="")
#
# if (line_counter != 0) and (line_counter % 20 == 0) :
# continuar = input("\n------\n\tPresione 'Enter' para continuar ->")
#
# line_counter += 1
#
# Parte 3
words_to_change = {
"Alice":"Bob",
"She":"He",
"Her":"His",
"Herself":"Himself",
"she":"he",
"her":"his",
"herself":"himself",
"Lewis":"Renato",
"Carroll":"Balcazar"
}
with open('alice.txt','r') as file:
line_counter = 0
for line in file:
for word in words_to_change:
line = line.replace(word, words_to_change[word]) # Esta linea reemplaza las palabras del direccionario por sus valores en el mismo
print(line, end="")
if (line_counter != 0) and (line_counter % 20 == 0) :
continuar = input("\n------\n\tPresione 'Enter' para continuar ->")
line_counter += 1
| 22.291667 | 136 | 0.580374 | # Parte 1
# with open('message.txt','r') as file:
# print(file.read())
#
# Parte 2
# with open('alice.txt','r') as file:
# (file.read())
# Parte 2
#with open('alice.txt','r') as file:
# line_counter = 0
# for line in file:
# print(line, end="")
#
# if (line_counter != 0) and (line_counter % 20 == 0) :
# continuar = input("\n------\n\tPresione 'Enter' para continuar ->")
#
# line_counter += 1
#
# Parte 3
words_to_change = {
"Alice":"Bob",
"She":"He",
"Her":"His",
"Herself":"Himself",
"she":"he",
"her":"his",
"herself":"himself",
"Lewis":"Renato",
"Carroll":"Balcazar"
}
with open('alice.txt','r') as file:
line_counter = 0
for line in file:
for word in words_to_change:
line = line.replace(word, words_to_change[word]) # Esta linea reemplaza las palabras del direccionario por sus valores en el mismo
print(line, end="")
if (line_counter != 0) and (line_counter % 20 == 0) :
continuar = input("\n------\n\tPresione 'Enter' para continuar ->")
line_counter += 1
| 0 | 0 | 0 |
c99b149b9ae976820d9493ad0e46b286a2dabe91 | 2,033 | py | Python | pybitcoin/services/blockchain_info.py | sea212/pybitcoin | d34a1179ee23c5fe72bc3b555d7b86e533fcfd31 | [
"MIT"
] | 220 | 2015-06-26T08:04:53.000Z | 2021-12-31T02:03:03.000Z | pybitcoin/services/blockchain_info.py | sea212/pybitcoin | d34a1179ee23c5fe72bc3b555d7b86e533fcfd31 | [
"MIT"
] | 33 | 2015-06-25T22:15:51.000Z | 2021-03-12T21:17:19.000Z | pybitcoin/services/blockchain_info.py | sea212/pybitcoin | d34a1179ee23c5fe72bc3b555d7b86e533fcfd31 | [
"MIT"
] | 113 | 2015-07-24T13:04:56.000Z | 2021-12-27T23:56:59.000Z | # -*- coding: utf-8 -*-
"""
pybitcoin
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import json, requests, traceback
from ..hash import reverse_hash
BLOCKCHAIN_API_BASE_URL = "https://blockchain.info"
from .blockchain_client import BlockchainClient
def get_unspents(address, blockchain_client=BlockchainInfoClient()):
""" Get the spendable transaction outputs, also known as UTXOs or
unspent transaction outputs.
"""
if not isinstance(blockchain_client, BlockchainInfoClient):
raise Exception('A BlockchainInfoClient object is required')
url = BLOCKCHAIN_API_BASE_URL + "/unspent?format=json&active=" + address
auth = blockchain_client.auth
if auth and len(auth) == 2 and isinstance(auth[0], str):
url = url + "&api_code=" + auth[0]
r = requests.get(url, auth=auth)
try:
unspents = r.json()["unspent_outputs"]
except ValueError, e:
raise Exception('Invalid response from blockchain.info.')
return format_unspents(unspents)
def broadcast_transaction(hex_tx, blockchain_client=BlockchainInfoClient()):
""" Dispatch a raw transaction to the network.
"""
url = BLOCKCHAIN_API_BASE_URL + '/pushtx'
payload = {'tx': hex_tx}
r = requests.post(url, data=payload, auth=blockchain_client.auth)
if 'submitted' in r.text.lower():
return {'success': True}
else:
raise Exception('Invalid response from blockchain.info.')
| 29.042857 | 76 | 0.651254 | # -*- coding: utf-8 -*-
"""
pybitcoin
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import json, requests, traceback
from ..hash import reverse_hash
BLOCKCHAIN_API_BASE_URL = "https://blockchain.info"
from .blockchain_client import BlockchainClient
class BlockchainInfoClient(BlockchainClient):
def __init__(self, api_key=None):
self.type = 'blockchain.info'
if api_key:
self.auth = (api_key, '')
else:
self.auth = None
def format_unspents(unspents):
return [{
"transaction_hash": reverse_hash(s["tx_hash"]),
"output_index": s["tx_output_n"],
"value": s["value"],
"script_hex": s["script"],
"confirmations": s["confirmations"]
}
for s in unspents
]
def get_unspents(address, blockchain_client=BlockchainInfoClient()):
""" Get the spendable transaction outputs, also known as UTXOs or
unspent transaction outputs.
"""
if not isinstance(blockchain_client, BlockchainInfoClient):
raise Exception('A BlockchainInfoClient object is required')
url = BLOCKCHAIN_API_BASE_URL + "/unspent?format=json&active=" + address
auth = blockchain_client.auth
if auth and len(auth) == 2 and isinstance(auth[0], str):
url = url + "&api_code=" + auth[0]
r = requests.get(url, auth=auth)
try:
unspents = r.json()["unspent_outputs"]
except ValueError, e:
raise Exception('Invalid response from blockchain.info.')
return format_unspents(unspents)
def broadcast_transaction(hex_tx, blockchain_client=BlockchainInfoClient()):
""" Dispatch a raw transaction to the network.
"""
url = BLOCKCHAIN_API_BASE_URL + '/pushtx'
payload = {'tx': hex_tx}
r = requests.post(url, data=payload, auth=blockchain_client.auth)
if 'submitted' in r.text.lower():
return {'success': True}
else:
raise Exception('Invalid response from blockchain.info.')
| 422 | 24 | 72 |
8a5beb1c9fb75a319a48bf20f1e2551bce799064 | 4,753 | py | Python | backend/api/permissions/DocumentComment.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 18 | 2017-05-10T21:55:11.000Z | 2021-03-01T16:41:32.000Z | backend/api/permissions/DocumentComment.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 1,167 | 2017-03-04T00:18:43.000Z | 2022-03-03T22:31:51.000Z | backend/api/permissions/DocumentComment.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 48 | 2017-03-09T17:19:39.000Z | 2022-02-24T16:38:17.000Z | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import permissions
from api.models.Document import Document
from api.models.DocumentComment import DocumentComment
from api.models.DocumentStatus import DocumentStatus
class DocumentCommentPermissions(permissions.BasePermission):
"""Used by Viewset to check permissions for API requests"""
@staticmethod
def user_can_comment(user, document, privileged):
"""
Check whether the user should have authority to add a comment.
Government Users with abilities to review the documents should
always have authority to add a comment, unless it's archived.
Fuel Suppliers with abilities to add or submit can add a comment
if the document is either in draft or submitted status.
"""
if user.is_government_user and \
user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW') and \
document.status.status in ['Received', 'Submitted']:
return True
if not user.is_government_user and not privileged and \
document.status.status in ['Draft', 'Submitted']:
return True
return False
@staticmethod
def user_can_edit_comment(user, comment: DocumentComment):
"""
Check whether the user should be able to edit their own comment.
Conditions for now is simple: Only the user that made the comment
can update their comment. And the status of the document has to be
in draft state.
"""
current_status = comment.document.status.id
if user.id == comment.create_user_id and \
current_status == \
DocumentStatus.objects.get_by_natural_key('Draft').id:
return True
return False
def has_permission(self, request, view):
"""Check permissions When an object does not yet exist (POST)"""
# Fallback to has_object_permission unless it's a POST
if request.method != 'POST':
return True
# Need this information to make a decision
if 'privileged_access' not in request.data and \
'document' in request.data:
return False
document = request.data['document']
privileged_access = request.data['privileged_access']
found = Document.objects.filter(id=document).first()
if not found:
return False
if found.create_user.organization != request.user.organization and \
not request.user.is_government_user:
return False
return DocumentCommentPermissions.user_can_comment(
request.user,
found,
privileged_access
)
def has_object_permission(self, request, view, obj):
"""Check permissions When an object does exist (PUT, GET)"""
# Users can always see and edit their own comments
if obj.create_user == request.user:
return True
# And see but not edit those from their others in their own
# organization
if obj.create_user.organization == request.user.organization and \
request.method in permissions.SAFE_METHODS:
return True
# Government roles can always view comments
# and can view or edit privileged comments with correct permission
if request.user.is_government_user:
# read
if request.method in permissions.SAFE_METHODS:
if obj.privileged_access:
return request.user.has_perm('DOCUMENTS_VIEW')
return True
# write
if request.method not in permissions.SAFE_METHODS:
if obj.privileged_access:
return request.user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW')
return True
# not authorized
return False
| 36.844961 | 79 | 0.658531 | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import permissions
from api.models.Document import Document
from api.models.DocumentComment import DocumentComment
from api.models.DocumentStatus import DocumentStatus
class DocumentCommentPermissions(permissions.BasePermission):
"""Used by Viewset to check permissions for API requests"""
@staticmethod
def user_can_comment(user, document, privileged):
"""
Check whether the user should have authority to add a comment.
Government Users with abilities to review the documents should
always have authority to add a comment, unless it's archived.
Fuel Suppliers with abilities to add or submit can add a comment
if the document is either in draft or submitted status.
"""
if user.is_government_user and \
user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW') and \
document.status.status in ['Received', 'Submitted']:
return True
if not user.is_government_user and not privileged and \
document.status.status in ['Draft', 'Submitted']:
return True
return False
@staticmethod
def user_can_edit_comment(user, comment: DocumentComment):
"""
Check whether the user should be able to edit their own comment.
Conditions for now is simple: Only the user that made the comment
can update their comment. And the status of the document has to be
in draft state.
"""
current_status = comment.document.status.id
if user.id == comment.create_user_id and \
current_status == \
DocumentStatus.objects.get_by_natural_key('Draft').id:
return True
return False
def has_permission(self, request, view):
"""Check permissions When an object does not yet exist (POST)"""
# Fallback to has_object_permission unless it's a POST
if request.method != 'POST':
return True
# Need this information to make a decision
if 'privileged_access' not in request.data and \
'document' in request.data:
return False
document = request.data['document']
privileged_access = request.data['privileged_access']
found = Document.objects.filter(id=document).first()
if not found:
return False
if found.create_user.organization != request.user.organization and \
not request.user.is_government_user:
return False
return DocumentCommentPermissions.user_can_comment(
request.user,
found,
privileged_access
)
def has_object_permission(self, request, view, obj):
"""Check permissions When an object does exist (PUT, GET)"""
# Users can always see and edit their own comments
if obj.create_user == request.user:
return True
# And see but not edit those from their others in their own
# organization
if obj.create_user.organization == request.user.organization and \
request.method in permissions.SAFE_METHODS:
return True
# Government roles can always view comments
# and can view or edit privileged comments with correct permission
if request.user.is_government_user:
# read
if request.method in permissions.SAFE_METHODS:
if obj.privileged_access:
return request.user.has_perm('DOCUMENTS_VIEW')
return True
# write
if request.method not in permissions.SAFE_METHODS:
if obj.privileged_access:
return request.user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW')
return True
# not authorized
return False
| 0 | 0 | 0 |
b6fe6bf9016e3bbbc3835e03e152c469f8b21ec9 | 1,382 | py | Python | prostagma/techniques/grid_search.py | Skopos-team/Prostagma | a6d0a0df421cd3740d3761486dc15d56e81dbc02 | [
"Apache-2.0"
] | null | null | null | prostagma/techniques/grid_search.py | Skopos-team/Prostagma | a6d0a0df421cd3740d3761486dc15d56e81dbc02 | [
"Apache-2.0"
] | null | null | null | prostagma/techniques/grid_search.py | Skopos-team/Prostagma | a6d0a0df421cd3740d3761486dc15d56e81dbc02 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from itertools import product
from prostagma.techniques.technique import SearchTechnique
from prostagma.performances.cross_validation import CrossValidation
from sklearn.grid_search import ParameterGrid
class GridSearch(SearchTechnique):
"""
The class implement the simple Grid Search
algorithm to find the best parameters using
Cross Validation.
"""
def fit(self, X_train, y_train):
"""
The method computes a score for each combination of
hyperparameters using the performance validator
@args
X_train : numpy array -> features
y_train : numpy array -> labels
@return
all_scores : numpy array -> all the mean scores
obtained using the performance validator
"""
all_scores = []
grid = ParameterGrid(self.parameters)
for params in grid:
print("Validating the model with: ", params)
scores = self.performance_validator.fit(X_train, y_train, self.model, params)
all_scores.append((scores.mean(), scores.std()))
if scores.mean() > self.best_score[0]:
self.best_score = (scores.mean(), scores.std())
self.best_param = params
all_scores = np.asarray(scores)
return all_scores
| 28.791667 | 80 | 0.743849 | import numpy as np
from itertools import product
from prostagma.techniques.technique import SearchTechnique
from prostagma.performances.cross_validation import CrossValidation
from sklearn.grid_search import ParameterGrid
class GridSearch(SearchTechnique):
"""
The class implement the simple Grid Search
algorithm to find the best parameters using
Cross Validation.
"""
def __init__(self, parameters, model,
performance_validator=CrossValidation()):
super(GridSearch, self).__init__(parameters=parameters,
model=model, performance_validator=performance_validator)
def fit(self, X_train, y_train):
"""
The method computes a score for each combination of
hyperparameters using the performance validator
@args
X_train : numpy array -> features
y_train : numpy array -> labels
@return
all_scores : numpy array -> all the mean scores
obtained using the performance validator
"""
all_scores = []
grid = ParameterGrid(self.parameters)
for params in grid:
print("Validating the model with: ", params)
scores = self.performance_validator.fit(X_train, y_train, self.model, params)
all_scores.append((scores.mean(), scores.std()))
if scores.mean() > self.best_score[0]:
self.best_score = (scores.mean(), scores.std())
self.best_param = params
all_scores = np.asarray(scores)
return all_scores
| 182 | 0 | 23 |
93f196915a274be46bde4b8b22e3b226f584e0d0 | 1,335 | py | Python | am_utils/omero.py | amedyukhina/am_utils | 46cb9034dcbf556c4d7c8027170aace5c5d4e36a | [
"Apache-2.0"
] | 1 | 2022-03-25T17:44:33.000Z | 2022-03-25T17:44:33.000Z | am_utils/omero.py | amedyukhina/am_utils | 46cb9034dcbf556c4d7c8027170aace5c5d4e36a | [
"Apache-2.0"
] | null | null | null | am_utils/omero.py | amedyukhina/am_utils | 46cb9034dcbf556c4d7c8027170aace5c5d4e36a | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
from omero.gateway import BlitzGateway
from am_utils.utils import imsave
| 29.021739 | 93 | 0.629963 | import os
import numpy as np
from omero.gateway import BlitzGateway
from am_utils.utils import imsave
def connect(USER, PASSWD, HOST, PORT):
conn = BlitzGateway(USER, PASSWD, host=HOST, port=PORT)
conn.connect()
conn.setSecure(True)
return conn
def load_image(img, conn):
image = conn.getObject('Image', img.getId())
size_z = image.getSizeZ()
size_c = image.getSizeC()
size_t = image.getSizeT()
pixels = image.getPrimaryPixels()
zct_list = []
for z in range(size_z):
for c in range(size_c):
for t in range(size_t):
zct_list.append((z, c, t))
planes = pixels.getPlanes(zct_list)
img_pix = []
for p in planes:
img_pix.append(p)
img_pix = np.array(img_pix)
img_pix = img_pix.reshape((size_z, size_c, size_t,) + img_pix.shape[1:])
img_pix = img_pix.transpose(2, 1, 0, 3, 4)
return img_pix
def download_project(project_id, conn, outputdir):
project = conn.getObject('Project', project_id)
for ds in project.listChildren():
print(ds.getName())
for image in ds.listChildren():
fnout = os.path.join(outputdir, project.getName(), ds.getName(), image.getName())
if not os.path.exists(fnout):
img = load_image(image, conn)
imsave(fnout, img)
| 1,159 | 0 | 69 |
d9dc4062252847333991d9770c3290d474222423 | 1,850 | py | Python | src/evaluation/eval_pawsx.py | cr1m5onk1ng/text_similarity | 2123621bf153683b35e9433835237812605bd42f | [
"Apache-2.0"
] | 1 | 2021-07-08T16:14:46.000Z | 2021-07-08T16:14:46.000Z | src/evaluation/eval_pawsx.py | cr1m5onk1ng/text_similarity | 2123621bf153683b35e9433835237812605bd42f | [
"Apache-2.0"
] | null | null | null | src/evaluation/eval_pawsx.py | cr1m5onk1ng/text_similarity | 2123621bf153683b35e9433835237812605bd42f | [
"Apache-2.0"
] | null | null | null | from src.utils import utils
from src.configurations import classifier_config as config
from src.configurations import embeddings_config as embeddings_config
from src.dataset.dataset import *
from src.modules.contextual_embedder import ContextualEmbedder
from src.evaluation.evaluators import ParaphraseEvaluator
from src.models.modeling import SiameseSentenceEmbedder, MBERTClassifier
from src.utils.metrics import SimilarityAccuracyMeter, SimilarityAveragePrecisionMeter, SimilarityF1Meter, AccuracyMeter
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
MODEL_NAMES = {
"mbert_sense": "training_mbert_paws_sense_avg_features_5epochs",
"siamese_online_contrastive": "training_siamese_paws_sense_online_contrastive_combine_5epochs",
"siamese_oc_cls": "training_siamese_paws_sense_online_contrastive_cls_pooling_5epochs",
"no_sense": "training_siamese_paws_no_sense_online_contrastive_8epochs",
"no_sense_softmax": "training_siamese_paws_no_sense_softmax_5epochs"
}
MODEL_NAME = MODEL_NAMES["mbert_sense"]
PRETRAINED_PATH = f"../training/trained_models/{MODEL_NAME}"
valid_data_loader = utils.load_file(f"../dataset/cached/pawsx_test_all_languages_16")
#metrics = {"validation": [SimilarityAveragePrecisionMeter, SimilarityAccuracyMeter]}
metrics = {"validation": [AccuracyMeter]}
"""
model = SiameseSentenceEmbedder(
train_model = False,
use_sense_embeddings=False
)
"""
model = MBERTClassifier(
train_model = False,
use_sense_embeddings=True,
senses_as_features=True
)
model.load_pretrained(PRETRAINED_PATH)
#model_2.load_pretrained(PRETRAINED_PATH_2)
evaluator = ParaphraseEvaluator(
model = model,
data_loader = valid_data_loader,
device = config.DEVICE,
metrics = metrics,
fp16=True,
verbose=True
)
evaluator.evaluate()
| 28.461538 | 120 | 0.808649 | from src.utils import utils
from src.configurations import classifier_config as config
from src.configurations import embeddings_config as embeddings_config
from src.dataset.dataset import *
from src.modules.contextual_embedder import ContextualEmbedder
from src.evaluation.evaluators import ParaphraseEvaluator
from src.models.modeling import SiameseSentenceEmbedder, MBERTClassifier
from src.utils.metrics import SimilarityAccuracyMeter, SimilarityAveragePrecisionMeter, SimilarityF1Meter, AccuracyMeter
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
MODEL_NAMES = {
"mbert_sense": "training_mbert_paws_sense_avg_features_5epochs",
"siamese_online_contrastive": "training_siamese_paws_sense_online_contrastive_combine_5epochs",
"siamese_oc_cls": "training_siamese_paws_sense_online_contrastive_cls_pooling_5epochs",
"no_sense": "training_siamese_paws_no_sense_online_contrastive_8epochs",
"no_sense_softmax": "training_siamese_paws_no_sense_softmax_5epochs"
}
MODEL_NAME = MODEL_NAMES["mbert_sense"]
PRETRAINED_PATH = f"../training/trained_models/{MODEL_NAME}"
valid_data_loader = utils.load_file(f"../dataset/cached/pawsx_test_all_languages_16")
#metrics = {"validation": [SimilarityAveragePrecisionMeter, SimilarityAccuracyMeter]}
metrics = {"validation": [AccuracyMeter]}
"""
model = SiameseSentenceEmbedder(
train_model = False,
use_sense_embeddings=False
)
"""
model = MBERTClassifier(
train_model = False,
use_sense_embeddings=True,
senses_as_features=True
)
model.load_pretrained(PRETRAINED_PATH)
#model_2.load_pretrained(PRETRAINED_PATH_2)
evaluator = ParaphraseEvaluator(
model = model,
data_loader = valid_data_loader,
device = config.DEVICE,
metrics = metrics,
fp16=True,
verbose=True
)
evaluator.evaluate()
| 0 | 0 | 0 |
438e4a300e3fcc923820a2ab88c9dec20230e055 | 2,092 | py | Python | OLSS/news20/ParamChoice/get_feature_mapping_and_appearance.py | zsdlightning/OLSS | 7fc5d8621adfcaab61defb61719b82aeb05cc1b3 | [
"MIT"
] | 1 | 2018-06-29T10:02:29.000Z | 2018-06-29T10:02:29.000Z | OLSS/news20/get_feature_mapping_and_appearance.py | zsdlightning/OLSS | 7fc5d8621adfcaab61defb61719b82aeb05cc1b3 | [
"MIT"
] | null | null | null | OLSS/news20/get_feature_mapping_and_appearance.py | zsdlightning/OLSS | 7fc5d8621adfcaab61defb61719b82aeb05cc1b3 | [
"MIT"
] | null | null | null | #!/bin/python
import os
import sys
import numpy as np
''' input: VW training file '''
''' output: feature_to_id.txt, feature_appearence_pos.npy, feature_appearence_neg.npy '''
''' note for data with sample weight feature_off = 3, otherwise feature_off = 2'''
feature_off = 3
#calculate the appearche of each features in the training data, for postive and negative samples
#in the end, appending the number of pos. & neg. samples
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: %s <VW training file>'%sys.argv[0]
sys.exit(1)
#VW format
features = {}
ct = 0
with open(sys.argv[1],'r') as f:
for line in f:
items = line.strip().split(' ')
ct = ct + 1
if ct%10000 == 0:
print ct
for item in items[feature_off:]:
key = item.split(':')[0]
if key not in features:
id = len(features)
features[ key ] = id
with open('feature_to_id.txt', 'w') as f:
for key in features:
id = features[key]
print >>f, '%s\t%d'%(key, id)
calc_per_feature_appearence(len(features), features, sys.argv[1])
| 29.464789 | 97 | 0.513384 | #!/bin/python
import os
import sys
import numpy as np
''' input: VW training file '''
''' output: feature_to_id.txt, feature_appearence_pos.npy, feature_appearence_neg.npy '''
''' note for data with sample weight feature_off = 3, otherwise feature_off = 2'''
feature_off = 3
#calculate the appearche of each features in the training data, for postive and negative samples
#in the end, appending the number of pos. & neg. samples
def calc_per_feature_appearence(d, fea2id, training_file):
#including intercept
#pos
res = np.zeros(d+1)
#neg
res2 = np.zeros(d+1)
with open(training_file, 'r') as f:
ct = 0
for line in f:
ct = ct + 1
items = line.strip().split(' ')
label = int(items[0])
if label == 1:
res[d] = res[d] + 1
else:
res2[d] = res2[d] + 1
for item in items[feature_off:]:
name = item.split(':')[0]
id = fea2id[name]
if label == 1:
res[ id ] = res[ id ] + 1
else:
res2[ id ] = res2[ id ] + 1
if ct%10000 == 0:
print ct
np.save('feature_appearence_pos.npy',res)
np.save('feature_appearence_neg.npy',res2)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: %s <VW training file>'%sys.argv[0]
sys.exit(1)
#VW format
features = {}
ct = 0
with open(sys.argv[1],'r') as f:
for line in f:
items = line.strip().split(' ')
ct = ct + 1
if ct%10000 == 0:
print ct
for item in items[feature_off:]:
key = item.split(':')[0]
if key not in features:
id = len(features)
features[ key ] = id
with open('feature_to_id.txt', 'w') as f:
for key in features:
id = features[key]
print >>f, '%s\t%d'%(key, id)
calc_per_feature_appearence(len(features), features, sys.argv[1])
| 846 | 0 | 22 |
b50ce51ba7bcd538c525021a17f50669ac182a53 | 6,893 | py | Python | contracts/tests/test_ecverify.py | younseunghyun/microraiden | 436562a60521c0b321edec822980807c3ac6bca4 | [
"MIT"
] | null | null | null | contracts/tests/test_ecverify.py | younseunghyun/microraiden | 436562a60521c0b321edec822980807c3ac6bca4 | [
"MIT"
] | null | null | null | contracts/tests/test_ecverify.py | younseunghyun/microraiden | 436562a60521c0b321edec822980807c3ac6bca4 | [
"MIT"
] | 1 | 2018-03-10T09:19:02.000Z | 2018-03-10T09:19:02.000Z | import pytest
from ethereum import tester
from utils import sign
from eth_utils import encode_hex, is_same_address
from tests.utils import balance_proof_hash, closing_message_hash
from tests.fixtures import (
owner_index,
owner,
contract_params,
create_contract,
get_token_contract,
get_accounts,
create_accounts
)
from tests.fixtures_uraiden import (
token_contract,
token_instance,
get_uraiden_contract,
uraiden_contract,
uraiden_instance,
delegate_contract,
delegate_instance,
)
@pytest.fixture
| 30.232456 | 155 | 0.719716 | import pytest
from ethereum import tester
from utils import sign
from eth_utils import encode_hex, is_same_address
from tests.utils import balance_proof_hash, closing_message_hash
from tests.fixtures import (
owner_index,
owner,
contract_params,
create_contract,
get_token_contract,
get_accounts,
create_accounts
)
from tests.fixtures_uraiden import (
token_contract,
token_instance,
get_uraiden_contract,
uraiden_contract,
uraiden_instance,
delegate_contract,
delegate_instance,
)
@pytest.fixture
def ecverify_test_contract(chain, create_contract):
ECVerifyTest = chain.provider.get_contract_factory('ECVerifyTest')
ecverify_test_contract = create_contract(ECVerifyTest, [])
return ecverify_test_contract
def test_ecrecover_output(web3, ecverify_test_contract):
(A, B) = web3.eth.accounts[:2]
block = 4804175
balance = 22000000000000000000
balance_message_hash = balance_proof_hash(B, block, balance, ecverify_test_contract.address)
balance_msg_sig, addr = sign.check(balance_message_hash, tester.k0)
assert is_same_address(addr, A)
r = bytes.fromhex('12d99ba7bd20ac17bac65bfd646146c1ddbeb607519db6e7935684334d891ed6')
s = bytes.fromhex('5d4ea3a13697c1d506f7bdb8cd672b944e2053d6d6bd87d4aa512fdc29ed9ae4')
v = 28
with pytest.raises(tester.TransactionFailed):
ecverify_test_contract.call().verify_ecrecover_output(balance_message_hash, r, s, 0)
# We have to simulate mining because ecrecover consumes a lot of gas for precompiled contracts
# on private chains.
web3.testing.mine(30)
ecverify_test_contract.call().verify_ecrecover_output(
balance_message_hash,
r,
s,
v
)
def test_sign(web3, ecverify_test_contract):
(A, B) = web3.eth.accounts[:2]
block = 4804175
balance = 22000000000000000000
balance_message_hash = balance_proof_hash(B, block, balance, ecverify_test_contract.address)
balance_message_hash2 = balance_proof_hash(B, block, balance + 1000, ecverify_test_contract.address)
signed_message, addr = sign.check(balance_message_hash, tester.k0)
signed_message_false, addr1 = sign.check(balance_message_hash, tester.k1)
assert is_same_address(addr, A)
assert is_same_address(addr1, B)
assert len(signed_message) == 65
assert len(signed_message_false) == 65
with pytest.raises(tester.TransactionFailed):
verified_address = ecverify_test_contract.call().verify(
balance_message_hash,
encode_hex(bytearray())
)
web3.testing.mine(30)
with pytest.raises(tester.TransactionFailed):
verified_address = ecverify_test_contract.call().verify(
balance_message_hash,
encode_hex(bytearray(64))
)
web3.testing.mine(30)
with pytest.raises(tester.TransactionFailed):
verified_address = ecverify_test_contract.call().verify(
balance_message_hash,
encode_hex(bytearray(66))
)
web3.testing.mine(30)
verified_address = ecverify_test_contract.call().verify(
balance_message_hash2,
signed_message
)
assert not is_same_address(verified_address, A)
verified_address = ecverify_test_contract.call().verify(
balance_message_hash,
signed_message
)
assert is_same_address(verified_address, A)
verified_address_false = ecverify_test_contract.call().verify(
balance_message_hash,
signed_message_false
)
assert not is_same_address(verified_address_false, A)
assert is_same_address(verified_address_false, B)
signer = '0x5601Ea8445A5d96EEeBF89A67C4199FbB7a43Fbb'
value = 1000
value2 = 2000
_address = '0x5601Ea8445A5d96EEeBF89A67C4199FbB7a43Fbb'
signed_message = '0x0adc437691e266072e9aa1763329062a6b2fa1d7f94034f1b1e691218fe9fd285f4f20132fa00230f591571a3575456bb382040e02e93ff0f32544907748a9821c'
signed_message = bytes.fromhex(signed_message[2:])
verified_address = ecverify_test_contract.call().verifyEthSignedTypedData(
_address,
value,
value2,
signed_message
)
assert is_same_address(verified_address, signer)
def test_extract_balance_proof_signature(get_accounts, token_instance, uraiden_instance):
(A, B) = get_accounts(2)
token = token_instance
uraiden = uraiden_instance
receiver = '0x5601Ea8445A5d96EEeBF89A67C4199FbB7a43Fbb'
block = 4804175
balance = 22000000000000000000
message_hash = balance_proof_hash(receiver, block, balance, uraiden.address)
balance_msg_sig, signer = sign.check(message_hash, tester.k2)
assert is_same_address(signer, A)
signature_address = uraiden.call().extractBalanceProofSignature(
receiver,
block,
balance,
balance_msg_sig
)
assert is_same_address(signature_address, signer)
# Wrong receiver
signature_address = uraiden.call().extractBalanceProofSignature(
B,
block,
balance,
balance_msg_sig
)
assert not is_same_address(signature_address, signer)
# Wrong block
signature_address = uraiden.call().extractBalanceProofSignature(
receiver,
10,
balance,
balance_msg_sig
)
assert not is_same_address(signature_address, signer)
# Wrong balance
signature_address = uraiden.call().extractBalanceProofSignature(
receiver,
block,
20,
balance_msg_sig
)
assert not is_same_address(signature_address, signer)
def test_extract_closing_signature(get_accounts, token_instance, uraiden_instance):
(A, B) = get_accounts(2)
token = token_instance
uraiden = uraiden_instance
sender = '0x5601Ea8445A5d96EEeBF89A67C4199FbB7a43Fbb'
block = 4804175
balance = 22000000000000000000
message_hash = closing_message_hash(sender, block, balance, uraiden.address)
balance_msg_sig, signer = sign.check(message_hash, tester.k2)
assert is_same_address(signer, A)
signature_address = uraiden.call().extractClosingSignature(
sender,
block,
balance,
balance_msg_sig
)
assert is_same_address(signature_address, signer)
# Wrong sender
signature_address = uraiden.call().extractClosingSignature(
B,
block,
balance,
balance_msg_sig
)
assert not is_same_address(signature_address, signer)
# Wrong block
signature_address = uraiden.call().extractClosingSignature(
sender,
10,
balance,
balance_msg_sig
)
assert not is_same_address(signature_address, signer)
# Wrong balance
signature_address = uraiden.call().extractClosingSignature(
sender,
block,
20,
balance_msg_sig
)
assert not is_same_address(signature_address, signer)
| 6,219 | 0 | 114 |
247b0f2580310cad3e1f381e524fdce487e2bc44 | 212 | py | Python | students/K33422/Larionova Anastasia/lab 1/task1_client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33422/Larionova Anastasia/lab 1/task1_client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33422/Larionova Anastasia/lab 1/task1_client.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', 7777))
sock.send('Hello, server! :)'.encode('utf-8'))
d = sock.recv(1024)
print(d.decode('utf-8'))
sock.close() | 21.2 | 56 | 0.70283 | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', 7777))
sock.send('Hello, server! :)'.encode('utf-8'))
d = sock.recv(1024)
print(d.decode('utf-8'))
sock.close() | 0 | 0 | 0 |
64b0ec99725da4d831528c9fb82671f8b6e998ee | 2,406 | py | Python | pybank/main.py | sadafmehds/Python-Challenge | 9244220d58879e4d72004b0dddcebe31f88f47c3 | [
"MIT"
] | null | null | null | pybank/main.py | sadafmehds/Python-Challenge | 9244220d58879e4d72004b0dddcebe31f88f47c3 | [
"MIT"
] | null | null | null | pybank/main.py | sadafmehds/Python-Challenge | 9244220d58879e4d72004b0dddcebe31f88f47c3 | [
"MIT"
] | null | null | null | import os
import csv
#maxi function identified to get the maximum number in a list and give its index in the list together as a list
#min function identified to get the minimum number in a list and give its index in the list together as a list
#initiating lists
list_date=[]
list_rev=[]
list_change=[]
csv_path=os.path.join("..","Resources","budget_data_1.csv")
with open(csv_path,newline="") as csvfile:
csv_reader=csv.reader(csvfile,delimiter=",")
next(csv_reader) # skip title line
csv_reader = list(csv_reader) # turn the reader into a list
count = len(csv_reader) # since we converted reader to list, now we can use len method to get total months from length of csv observations
# compute total by summing every second col of each row
total = sum(int(mylist[1]) for mylist in csv_reader)
#turn each column into its own separate list, store into date and revenue lists (list_date and list_rev)
for i in csv_reader:
list_date.append(str(i[0]))
list_rev.append(float(i[1]))
#create a difference list as list_change by differencing the revenues in list_rev
for i in range(0,len(list_rev)-1):
list_change.append(list_rev[i+1]-list_rev[i])
#compute average change from the computed differences in list_change
for j in list_change:
average_diff=sum(list_change)/len(list_change)
print(" Financial Analysis\n-------------------------------------------")
print("Total Months: " + str(count))
print("Total Revenue: " + str(total))
print("Average Revenue Change: "+str(average_diff))
#predefined maxi,mini functions return max and min values and the index of values in the same list
#slice dates list (list_date) via the returned index
#get max/min values via index 1 of both maxi/min functions
print("Greatest Increase in Revenue: "+str(list_date[maxi(list_change)[0]])+" ("+ str(maxi(list_change)[1])+")")
print("Greatest Decrease in Revenue: "+str(list_date[min(list_change)[0]])+" ("+ str(min(list_change)[1])+")")
| 41.482759 | 149 | 0.669576 | import os
import csv
#maxi function identified to get the maximum number in a list and give its index in the list together as a list
def maxi(any_list):
s=any_list[0]
for i in any_list:
if i>s:
maxi=i
date_index=any_list.index(i)
return([date_index,maxi])
#min function identified to get the minimum number in a list and give its index in the list together as a list
def min(any_list):
s=any_list[0]
for i in any_list:
if i<s:
min=i
date_index=any_list.index(i)
return([date_index,min])
#initiating lists
list_date=[]
list_rev=[]
list_change=[]
csv_path=os.path.join("..","Resources","budget_data_1.csv")
with open(csv_path,newline="") as csvfile:
csv_reader=csv.reader(csvfile,delimiter=",")
next(csv_reader) # skip title line
csv_reader = list(csv_reader) # turn the reader into a list
count = len(csv_reader) # since we converted reader to list, now we can use len method to get total months from length of csv observations
# compute total by summing every second col of each row
total = sum(int(mylist[1]) for mylist in csv_reader)
#turn each column into its own separate list, store into date and revenue lists (list_date and list_rev)
for i in csv_reader:
list_date.append(str(i[0]))
list_rev.append(float(i[1]))
#create a difference list as list_change by differencing the revenues in list_rev
for i in range(0,len(list_rev)-1):
list_change.append(list_rev[i+1]-list_rev[i])
#compute average change from the computed differences in list_change
for j in list_change:
average_diff=sum(list_change)/len(list_change)
print(" Financial Analysis\n-------------------------------------------")
print("Total Months: " + str(count))
print("Total Revenue: " + str(total))
print("Average Revenue Change: "+str(average_diff))
#predefined maxi,mini functions return max and min values and the index of values in the same list
#slice dates list (list_date) via the returned index
#get max/min values via index 1 of both maxi/min functions
print("Greatest Increase in Revenue: "+str(list_date[maxi(list_change)[0]])+" ("+ str(maxi(list_change)[1])+")")
print("Greatest Decrease in Revenue: "+str(list_date[min(list_change)[0]])+" ("+ str(min(list_change)[1])+")")
| 287 | 0 | 44 |
e6f6247ca2fde79aa04c30ca02345aacf5ecbb2b | 5,324 | py | Python | forecite/topic_identification/generate_dataset.py | allenai/ForeCite | 4c41203d8692de40f018819a78c4fc56d2cf2d7b | [
"Apache-2.0"
] | 24 | 2020-06-15T09:28:16.000Z | 2021-08-09T14:06:43.000Z | forecite/topic_identification/generate_dataset.py | allenai/ForeCite | 4c41203d8692de40f018819a78c4fc56d2cf2d7b | [
"Apache-2.0"
] | 2 | 2020-09-08T16:28:04.000Z | 2021-01-25T17:25:42.000Z | forecite/topic_identification/generate_dataset.py | allenai/ForeCite | 4c41203d8692de40f018819a78c4fc56d2cf2d7b | [
"Apache-2.0"
] | 5 | 2020-06-21T07:20:57.000Z | 2020-11-26T07:35:34.000Z | from typing import List, Dict
import json
import spacy
import pickle
import multiprocessing
import argparse
from scispacy.abbreviation import AbbreviationDetector
from tqdm import tqdm
from collections import defaultdict
from forecite.consts import *
from forecite import s2_utils
def compute_noun_phrases_worker(input_text: str) -> List:
"""Returns the noun phrases in a string"""
doc = s2_utils.nlp_md(input_text[:1000000])
return [span.text.lower() for span in doc.noun_chunks]
def get_date_key_from_arxiv_id(arxiv_id: str):
"""
Return a date key of the form <year>_<month> from an arxiv id
"""
if "/" in arxiv_id:
arxiv_id = arxiv_id.split("/")[1]
short_year = arxiv_id[0:2]
month = arxiv_id[2:4]
if (
short_year.startswith("0")
or short_year.startswith("1")
or short_year.startswith("2")
):
year = "20" + short_year
else:
year = "19" + short_year
return year + "_" + month
def generate_dataset_no_refs_arxiv_cs(num_processes: int = 1):
"""
Function to generate the full topic extraction dataset for arxiv cs with references clipped
"""
if not os.path.exists(NO_REFS_ARXIV_CS_DATA_ROOT):
print("Creating directory at {}".format(NO_REFS_ARXIV_CS_DATA_ROOT))
os.mkdir(NO_REFS_ARXIV_CS_DATA_ROOT)
if not os.path.exists(NO_REFS_ARXIV_CS_IDS_PATH):
print("Querying for all arxiv cs ids...")
arxiv_ids = s2_utils.get_all_arxiv_cs_ids()
print("Writing arxiv cs ids to {}".format(NO_REFS_ARXIV_CS_IDS_PATH))
with open(NO_REFS_ARXIV_CS_IDS_PATH, "w") as _arxiv_ids_json_file:
json.dump(list(arxiv_ids), _arxiv_ids_json_file)
else:
print("Loading arxiv cs ids from {}".format(NO_REFS_ARXIV_CS_IDS_PATH))
with open(NO_REFS_ARXIV_CS_IDS_PATH) as _arxiv_ids_json_file:
arxiv_ids = json.load(_arxiv_ids_json_file)
if not os.path.exists(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH):
print("Generating arxiv to s2 mapping...")
arxiv_to_s2_mapping = s2_utils.get_arxiv_to_s2_id_mapping(arxiv_ids, [])
print(
"Writing arxiv to s2 mapping to {}".format(
NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH
)
)
with open(
NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH, "w"
) as _arxiv_to_s2_mapping_json_file:
json.dump(arxiv_to_s2_mapping, _arxiv_to_s2_mapping_json_file)
else:
print(
"Loading arxiv to s2 mapping from {}".format(
NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH
)
)
with open(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH) as _json_file:
arxiv_to_s2_mapping = json.load(_json_file)
s2_ids = [
arxiv_to_s2_mapping[arxiv_id]
for arxiv_id in arxiv_ids
if arxiv_to_s2_mapping[arxiv_id] != ""
]
if not os.path.exists(NO_REFS_ARXIV_CS_TITLE_NPS_PATH):
print("Getting data")
(
title_inverted_index,
abstract_inverted_index,
body_inverted_index,
normalization_dict,
s2_id_to_citing_ids,
s2_id_to_references,
s2_id_to_canonical,
) = s2_utils.full_data_collection_parallel(s2_ids, num_processes)
print("Dumping title nps to {}".format(NO_REFS_ARXIV_CS_TITLE_NPS_PATH))
with open(NO_REFS_ARXIV_CS_TITLE_NPS_PATH, "w") as _json_file:
json.dump(title_inverted_index, _json_file)
print("Dumping abstract nps to {}".format(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH))
with open(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH, "w") as _json_file:
json.dump(abstract_inverted_index, _json_file)
print("Dumping body nps to {}".format(NO_REFS_ARXIV_CS_BODY_NPS_PATH))
with open(NO_REFS_ARXIV_CS_BODY_NPS_PATH, "w") as _json_file:
json.dump(body_inverted_index, _json_file)
print("Dumping normalization to {}".format(NO_REFS_ARXIV_CS_NORMALIZATION_PATH))
with open(NO_REFS_ARXIV_CS_NORMALIZATION_PATH, "w") as _json_file:
json.dump(normalization_dict, _json_file)
print("Dumping citing ids to {}".format(NO_REFS_ARXIV_CS_CITING_IDS_PATH))
with open(NO_REFS_ARXIV_CS_CITING_IDS_PATH, "w") as _json_file:
json.dump(s2_id_to_citing_ids, _json_file)
print("Dumping references to {}".format(NO_REFS_ARXIV_CS_REFERENCES_PATH))
with open(NO_REFS_ARXIV_CS_REFERENCES_PATH, "w") as _json_file:
json.dump(s2_id_to_references, _json_file)
print(
"Dumping canonicalization to {}".format(
NO_REFS_ARXIV_CS_CANONICALIZATION_PATH
)
)
with open(NO_REFS_ARXIV_CS_CANONICALIZATION_PATH, "w") as _json_file:
json.dump(s2_id_to_canonical, _json_file)
print("Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", help="Which dataset to run on")
parser.add_argument("--num_processes", type=int, help="How many processes to use")
args = parser.parse_args()
if args.dataset == "no_refs_arxiv":
generate_dataset_no_refs_arxiv_cs(num_processes=args.num_processes)
else:
raise Exception(f"Dataset {args.dataset} not supported")
| 35.731544 | 95 | 0.679189 | from typing import List, Dict
import json
import spacy
import pickle
import multiprocessing
import argparse
from scispacy.abbreviation import AbbreviationDetector
from tqdm import tqdm
from collections import defaultdict
from forecite.consts import *
from forecite import s2_utils
def compute_noun_phrases_worker(input_text: str) -> List:
"""Returns the noun phrases in a string"""
doc = s2_utils.nlp_md(input_text[:1000000])
return [span.text.lower() for span in doc.noun_chunks]
def get_date_key_from_arxiv_id(arxiv_id: str):
"""
Return a date key of the form <year>_<month> from an arxiv id
"""
if "/" in arxiv_id:
arxiv_id = arxiv_id.split("/")[1]
short_year = arxiv_id[0:2]
month = arxiv_id[2:4]
if (
short_year.startswith("0")
or short_year.startswith("1")
or short_year.startswith("2")
):
year = "20" + short_year
else:
year = "19" + short_year
return year + "_" + month
def generate_dataset_no_refs_arxiv_cs(num_processes: int = 1):
"""
Function to generate the full topic extraction dataset for arxiv cs with references clipped
"""
if not os.path.exists(NO_REFS_ARXIV_CS_DATA_ROOT):
print("Creating directory at {}".format(NO_REFS_ARXIV_CS_DATA_ROOT))
os.mkdir(NO_REFS_ARXIV_CS_DATA_ROOT)
if not os.path.exists(NO_REFS_ARXIV_CS_IDS_PATH):
print("Querying for all arxiv cs ids...")
arxiv_ids = s2_utils.get_all_arxiv_cs_ids()
print("Writing arxiv cs ids to {}".format(NO_REFS_ARXIV_CS_IDS_PATH))
with open(NO_REFS_ARXIV_CS_IDS_PATH, "w") as _arxiv_ids_json_file:
json.dump(list(arxiv_ids), _arxiv_ids_json_file)
else:
print("Loading arxiv cs ids from {}".format(NO_REFS_ARXIV_CS_IDS_PATH))
with open(NO_REFS_ARXIV_CS_IDS_PATH) as _arxiv_ids_json_file:
arxiv_ids = json.load(_arxiv_ids_json_file)
if not os.path.exists(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH):
print("Generating arxiv to s2 mapping...")
arxiv_to_s2_mapping = s2_utils.get_arxiv_to_s2_id_mapping(arxiv_ids, [])
print(
"Writing arxiv to s2 mapping to {}".format(
NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH
)
)
with open(
NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH, "w"
) as _arxiv_to_s2_mapping_json_file:
json.dump(arxiv_to_s2_mapping, _arxiv_to_s2_mapping_json_file)
else:
print(
"Loading arxiv to s2 mapping from {}".format(
NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH
)
)
with open(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH) as _json_file:
arxiv_to_s2_mapping = json.load(_json_file)
s2_ids = [
arxiv_to_s2_mapping[arxiv_id]
for arxiv_id in arxiv_ids
if arxiv_to_s2_mapping[arxiv_id] != ""
]
if not os.path.exists(NO_REFS_ARXIV_CS_TITLE_NPS_PATH):
print("Getting data")
(
title_inverted_index,
abstract_inverted_index,
body_inverted_index,
normalization_dict,
s2_id_to_citing_ids,
s2_id_to_references,
s2_id_to_canonical,
) = s2_utils.full_data_collection_parallel(s2_ids, num_processes)
print("Dumping title nps to {}".format(NO_REFS_ARXIV_CS_TITLE_NPS_PATH))
with open(NO_REFS_ARXIV_CS_TITLE_NPS_PATH, "w") as _json_file:
json.dump(title_inverted_index, _json_file)
print("Dumping abstract nps to {}".format(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH))
with open(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH, "w") as _json_file:
json.dump(abstract_inverted_index, _json_file)
print("Dumping body nps to {}".format(NO_REFS_ARXIV_CS_BODY_NPS_PATH))
with open(NO_REFS_ARXIV_CS_BODY_NPS_PATH, "w") as _json_file:
json.dump(body_inverted_index, _json_file)
print("Dumping normalization to {}".format(NO_REFS_ARXIV_CS_NORMALIZATION_PATH))
with open(NO_REFS_ARXIV_CS_NORMALIZATION_PATH, "w") as _json_file:
json.dump(normalization_dict, _json_file)
print("Dumping citing ids to {}".format(NO_REFS_ARXIV_CS_CITING_IDS_PATH))
with open(NO_REFS_ARXIV_CS_CITING_IDS_PATH, "w") as _json_file:
json.dump(s2_id_to_citing_ids, _json_file)
print("Dumping references to {}".format(NO_REFS_ARXIV_CS_REFERENCES_PATH))
with open(NO_REFS_ARXIV_CS_REFERENCES_PATH, "w") as _json_file:
json.dump(s2_id_to_references, _json_file)
print(
"Dumping canonicalization to {}".format(
NO_REFS_ARXIV_CS_CANONICALIZATION_PATH
)
)
with open(NO_REFS_ARXIV_CS_CANONICALIZATION_PATH, "w") as _json_file:
json.dump(s2_id_to_canonical, _json_file)
print("Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", help="Which dataset to run on")
parser.add_argument("--num_processes", type=int, help="How many processes to use")
args = parser.parse_args()
if args.dataset == "no_refs_arxiv":
generate_dataset_no_refs_arxiv_cs(num_processes=args.num_processes)
else:
raise Exception(f"Dataset {args.dataset} not supported")
| 0 | 0 | 0 |
891d783cb0672d3fd69134836f2f60e7655b06fc | 1,946 | py | Python | accounts/admin.py | yusufom/marlymart | 06088af43e6f78b7385c1cf7ea5b4b68337360d8 | [
"Unlicense"
] | null | null | null | accounts/admin.py | yusufom/marlymart | 06088af43e6f78b7385c1cf7ea5b4b68337360d8 | [
"Unlicense"
] | null | null | null | accounts/admin.py | yusufom/marlymart | 06088af43e6f78b7385c1cf7ea5b4b68337360d8 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from mptt.admin import DraggableMPTTAdmin
from .models import Customer, Product, Category, Order
# Register your models here.
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Product, Productdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Order, OrderAdmin)
| 36.037037 | 95 | 0.666495 | from django.contrib import admin
from mptt.admin import DraggableMPTTAdmin
from .models import Customer, Product, Category, Order
# Register your models here.
class CategoryAdmin(DraggableMPTTAdmin):
mptt_indent_field = "name"
list_display = ('tree_actions', 'indented_title',
'related_products_count', 'related_products_cumulative_count')
list_display_links = ('indented_title',)
prepopulated_fields = {'slug': ('title',)}
def get_queryset(self, request):
qs = super().get_queryset(request)
# Add cumulative product count
qs = Category.objects.add_related_count(
qs,
Product,
'category',
'products_cumulative_count',
cumulative=True)
# Add non cumulative product count
qs = Category.objects.add_related_count(qs,
Product,
'category',
'products_count',
cumulative=False)
return qs
def related_products_count(self, instance):
return instance.products_count
related_products_count.short_description = 'Related products (for this specific category)'
def related_products_cumulative_count(self, instance):
return instance.products_cumulative_count
related_products_cumulative_count.short_description = 'Related products (in tree)'
class Productdmin(admin.ModelAdmin):
list_display = ['title','price','product_pic']
class CustomerAdmin(admin.ModelAdmin):
list_display = ['business_name','date_created','profile_pic',]
class OrderAdmin(admin.ModelAdmin):
list_display = ['name_of_customer', 'product','ordered_on', 'Updated_at']
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Product, Productdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Order, OrderAdmin)
| 696 | 804 | 100 |
575e8a28a9cb731a41c4f08831d42dad11d4517a | 2,374 | py | Python | main.py | ValerioDISano/CarND-Advanced-Lane-Lines | 6dcad9e3a89fff575a9be4365ef77dd402994668 | [
"MIT"
] | null | null | null | main.py | ValerioDISano/CarND-Advanced-Lane-Lines | 6dcad9e3a89fff575a9be4365ef77dd402994668 | [
"MIT"
] | null | null | null | main.py | ValerioDISano/CarND-Advanced-Lane-Lines | 6dcad9e3a89fff575a9be4365ef77dd402994668 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from utilities import ImagesLoader
from utilities import VideoLoader
from utilities import Visualizer
from Line import ImageProcessor
import argparse
import sys
if __name__ == "__main__":
main()
| 35.432836 | 114 | 0.672283 | #!/usr/bin/env python3
from utilities import ImagesLoader
from utilities import VideoLoader
from utilities import Visualizer
from Line import ImageProcessor
import argparse
import sys
def main():
# enput arguments to main script to select data source data type (images or videos)
# and select the source data folder
ap = argparse.ArgumentParser(description="Lane detection")
ap.add_argument("-d", "--directory", required=True, type=str, help="Data files directory")
ap.add_argument("-t", "--file_type", required=True, type=str, help="Choose between videos or images as input")
args = vars(ap.parse_args())
test_path = args["directory"]
file_type = args["file_type"]
print("Selected directory: {}".format(test_path))
print("Selected filte type: {}".format(file_type))
# Test that the input argumets are valid
if file_type != "image" and file_type != "video":
sys.stderr.write("Error!!! Unrecognized option -t/--file_type option.")
print(ap.print_help())
# filter data is used to select only files with a certain extension or name
filter_data = (lambda path : path.endswith('project_video.mp4'))\
if file_type == "video"\
else (lambda path : path.endswith('.jpg'))
# allocate a Images/VideoLoader class to manage the access to the files to be precessed
loader = ImagesLoader(test_path)\
if file_type == "image"\
else VideoLoader(test_path, predicate=filter_data)
# Allocate an ImageProcessor class that is responsible to apply the whole
# lanes detection pipeline to the data
if file_type == "video":
processor = ImageProcessor()
frame_processor = lambda frame : processor[frame]
else: # if the input is a series of images taken from different scenarios is important
# to reset the class in order to not use previous image computation on the
# current image
def frame_processor(frame):
processor = ImageProcessor()
return processor[frame]
# Apply the lanes detection pipeline
loader.processData(frame_processor)
# Save the result
loader.writeOutput("video_out.mp4")\
if file_type == "video"\
else loader.writeOutput("output_images")
return
if __name__ == "__main__":
main()
| 2,125 | 0 | 23 |
2864318e77a41277bea38fe038b27d9bacdd2e8a | 136 | py | Python | programme.py | RekingAV/pingscriptpython | 5e5ba0d661b11ce90b75a52471c726f3836a89d7 | [
"Apache-2.0"
] | null | null | null | programme.py | RekingAV/pingscriptpython | 5e5ba0d661b11ce90b75a52471c726f3836a89d7 | [
"Apache-2.0"
] | null | null | null | programme.py | RekingAV/pingscriptpython | 5e5ba0d661b11ce90b75a52471c726f3836a89d7 | [
"Apache-2.0"
] | null | null | null | if ping('52.113.194.132', verbose = True):
print('ping occuring was succesful')
else:
print('website is vulnerable to be hacked')
| 27.2 | 45 | 0.698529 | if ping('52.113.194.132', verbose = True):
print('ping occuring was succesful')
else:
print('website is vulnerable to be hacked')
| 0 | 0 | 0 |
64e631883c9f3502b9d46971270187e75be9997d | 2,442 | py | Python | qtutils/measurements/virtual_instrument.py | albe-jj/qtutils | c515baa6a4ef61de70b883d8bc9feda5c4d259b3 | [
"BSD-2-Clause"
] | null | null | null | qtutils/measurements/virtual_instrument.py | albe-jj/qtutils | c515baa6a4ef61de70b883d8bc9feda5c4d259b3 | [
"BSD-2-Clause"
] | null | null | null | qtutils/measurements/virtual_instrument.py | albe-jj/qtutils | c515baa6a4ef61de70b883d8bc9feda5c4d259b3 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 15:08:51 2020
@author: Albo
"""
from qcodes import Instrument
from qcodes.utils.validators import Numbers
from functools import reduce
class VirtualInstrument(Instrument):
"""
Implements a device as a virtual instrument for QCoDeS
"""
def __init__(self, name, parameter_map):
"""Create a virtual instrument to represent the device params
Args:
name (str): name of the virtual instrument
parameter_map (dict): dictionary mapping the hallbars measurable parameters to their
measurement instruments
"""
super().__init__(name=name)
self._map_parameters(parameter_map)
def _map_parameters(self, parameter_map):
"""Add all parameters that need to be mapped into a single virtual instrument"""
for parameter_name in parameter_map:
info = parameter_map[parameter_name].copy()
external_instrument = Instrument.find_instrument(info.pop('instrument'))
# external_parameter = external_instrument[info.pop('parameter')]
param_seq = info.pop('parameter').split('.')
external_parameter = reduce(getattr, param_seq, external_instrument)
unit = info.pop('unit', external_parameter.unit)
if external_parameter.gettable:
get_cmd = external_parameter.get
else: get_cmd = False
if external_parameter.settable:
set_cmd = external_parameter.set
else: set_cmd = False
self.add_parameter(name=parameter_name,
get_cmd=get_cmd,
set_cmd=set_cmd, #external_parameter.__call__,
unit=unit,
vals=Numbers(),
**info)
def ask_raw(self, cmd: str) -> None:
"""Dummy method to satisfy base class overrides"""
raise NotImplementedError(
f'Instrument {type(self).__name__} is virtual and requires no ask_raw method'
)
def write_raw(self, cmd: str) -> None:
"""Dummy method to satisfy base class overrides"""
raise NotImplementedError(
f'Instrument {type(self).__name__} is virtual and requires no write_raw method'
)
| 35.911765 | 96 | 0.59869 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 15:08:51 2020
@author: Albo
"""
from qcodes import Instrument
from qcodes.utils.validators import Numbers
from functools import reduce
class VirtualInstrument(Instrument):
"""
Implements a device as a virtual instrument for QCoDeS
"""
def __init__(self, name, parameter_map):
"""Create a virtual instrument to represent the device params
Args:
name (str): name of the virtual instrument
parameter_map (dict): dictionary mapping the hallbars measurable parameters to their
measurement instruments
"""
super().__init__(name=name)
self._map_parameters(parameter_map)
def _map_parameters(self, parameter_map):
"""Add all parameters that need to be mapped into a single virtual instrument"""
for parameter_name in parameter_map:
info = parameter_map[parameter_name].copy()
external_instrument = Instrument.find_instrument(info.pop('instrument'))
# external_parameter = external_instrument[info.pop('parameter')]
param_seq = info.pop('parameter').split('.')
external_parameter = reduce(getattr, param_seq, external_instrument)
unit = info.pop('unit', external_parameter.unit)
if external_parameter.gettable:
get_cmd = external_parameter.get
else: get_cmd = False
if external_parameter.settable:
set_cmd = external_parameter.set
else: set_cmd = False
self.add_parameter(name=parameter_name,
get_cmd=get_cmd,
set_cmd=set_cmd, #external_parameter.__call__,
unit=unit,
vals=Numbers(),
**info)
def ask_raw(self, cmd: str) -> None:
"""Dummy method to satisfy base class overrides"""
raise NotImplementedError(
f'Instrument {type(self).__name__} is virtual and requires no ask_raw method'
)
def write_raw(self, cmd: str) -> None:
"""Dummy method to satisfy base class overrides"""
raise NotImplementedError(
f'Instrument {type(self).__name__} is virtual and requires no write_raw method'
)
def close(self):
super().close()
| 19 | 0 | 35 |
bcfbd567ed08786b87b9a99550d434769f5476b2 | 22,876 | py | Python | Meta-heuristic project/Algorithm Codes/Project 3 - GA- V01.py | nusstu-dz/IE5600-Applied-Programming-for-Industrial-Systems | 2289d9a63f49d8d730a671de1b491acdbdca9650 | [
"MIT"
] | null | null | null | Meta-heuristic project/Algorithm Codes/Project 3 - GA- V01.py | nusstu-dz/IE5600-Applied-Programming-for-Industrial-Systems | 2289d9a63f49d8d730a671de1b491acdbdca9650 | [
"MIT"
] | null | null | null | Meta-heuristic project/Algorithm Codes/Project 3 - GA- V01.py | nusstu-dz/IE5600-Applied-Programming-for-Industrial-Systems | 2289d9a63f49d8d730a671de1b491acdbdca9650 | [
"MIT"
] | null | null | null | import copy
import os
import math
import numpy as np
import random
'''inputs'''
os.chdir('input/')
questionInput = open('Prob-30A-50.txt', 'r')
questionInput = questionInput.readlines()
noOfVehicles = int(questionInput[0])
maxCapacity = int(questionInput[1])
Tmax = int(questionInput[2])
depot = questionInput[5].replace(',', '.').split()
depot = Node(int(depot[0]), float(depot[1]), float(depot[2]), 0, 0, Tmax)
pickupNodes = {}
requests=0
for i in range(9, 999):
# additional logic to detect end of pick up nodes
if len(questionInput[i]) < 3:
break
else:
node = questionInput[i].replace(',', '.').split()
if node==[]:
break
pickupNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]),
float(node[7]))
# count number of requests
requests += 1
deliveryNodes = {}
for i in range(9+requests+3, 9+requests+3+requests):
node = questionInput[i].replace(',', '.').split()
deliveryNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]),
float(node[7]))
deliveryNodes[int(node[0])].profit = 80 #each node's profit upon delivery
allNodes = {0: depot, **pickupNodes, **deliveryNodes}
# build the pickup delivery matching dict
pickupDeliveryPair = {}
iter = 1
for item in deliveryNodes:
pickupDeliveryPair[iter] = deliveryNodes[item].idx
iter += 1
speedMat = []
# blockcount = 9+requests+3+requests+2 brings you to the first speed pattern in input file
blockcount = 9+requests+3+requests+2
for i in range(5):
speed = []
for j in range(i * 6 + blockcount, i * 6 + (blockcount+4)):
time = questionInput[j].replace(',', '.').split()
speed.append([float(time[0]), float(time[1]), float(time[3])])
speedMat.append(speed)
speedChooseMat = []
# use blockcocunt to read the speed choose matrix
for i in range(blockcount+31, blockcount+31+2*requests+1):
speedChooseMat.append([int(i) for i in questionInput[i].replace(',', '.').split()])
''' processing input '''
# calculate distance matrix
# total lines required = 2*requests + 2 (but we minus 1 because range starts from 0)
distMat = [[0.0] * (2*requests + 2-1) for i in range(2*requests + 2-1)]
for i in range((2*requests + 2-1)):
for j in range(i + 1, (2*requests + 2-1)):
dist = getDist(i, j)
distMat[i][j] = dist
distMat[j][i] = dist
''' generate BFS '''
currOptimalSolution = RandomBFS()
''' how to use checking function '''
# currOptimalSolution = [
# [0,1,11,3,13,0],
# [0,10,20,0],
# [0,5,15,6,16,0]] #just an example
# #iterate based on this
# print(currOptimalSolution)
s0=[]
for vehicle in currOptimalSolution:
oneroute=[vehicle, checkFeasible(vehicle)]
s0.append(oneroute)
nn= len(pickupNodes)
profit0 = Profit_Solution(s0)
print('******* inital solution')
print(profit0)
print(*s0, sep = "\n")
# print(*s0)
print('******** inital solution')
# print( 'score:' + str(checkFeasible(vehicle)) + ' route:' + str(vehicle) )
# s0 = [[0, 14, 29, 11, 26, 1, 16, 3, 18, 0], 138.45952332463185], [[0, 10, 25, 0], 18.873851271071658], [
# [0, 8, 23, 6, 21, 13, 28, 5, 20, 15, 30, 4, 19, 2, 17, 0], 366.359325517866], [[0, 7, 22, 0], 29.4378156146646], [
# [0, 0], False]
# nn=15
# s1= NEIGHBOR_VRP(s0,nn)
# print(s1)
#GA Part
pop= get_population(s0,nn)
retain_rate=0.3
random_select_rate=0.5
mutation_rate=0.4
#
# selected = selection(pop)
# # selected=rank_route(selected)
# grandpare = remove_profit(selected)
#
# crossed = crossover(grandpare)
# children = profit_children(crossed)
# children=mutation(children)
#
# pop= selected+children
# # pop = get_population(newpop,nn)
# selected = selection(pop)
maxprofit=0
register = []
i = 0
itter_time=100
while i < itter_time:
# 选择繁殖个体群
selected = selection(pop)
# selected=rank_route(selected)
grandpare = remove_profit(selected)
# 交叉繁殖
crossed = crossover(grandpare)
children= profit_children(crossed)
# 变异操作
children = mutation(children)
# 更新种群
pop = selected + children
[bestroute, bestprofit] = find_max(pop)
if maxprofit < bestprofit:
maxprofit = bestprofit
# print('New best profit is ' + str(maxprofit))
# print('New best route is ' + str(bestroute))
i=i+1
print('Current population is :')
print(*pop,sep='\n')
print('---------------------------------------------------------------------')
# print('The best profit is ' + str(maxprofit))
# print('The best route is ' + str(bestroute))
print(maxprofit)
# def decoding2(corssed):
# decoded=[]
# for routes in crossed:
# start = routes.index(0)
# if routes[start + 1] == 0:
# start += 1
#
# newroutes = routes[start:] + routes[:start]
# routes = newroutes
# routes.append(0)
# # print(routes)
# l = []
# for w in routes:
# if start: start = l.append([]) # l.append() returns None, that is falsey...
# if w != 0:
# l[-1].append(w)
# if w == 0: start = 1
#
# res = []
# for m in l:
# if m != []:
# res.append([0] + m + [0])
# while len(res) < noOfVehicles:
# res.append([0, 0])
#
# # soluion0 = [res, checkFeasible(res)]
# # soluion0.append(oneroute)
# decoded.append(res)
# return decoded
# s2=[]
# for n_ in crossed_res:
# oneroute_=[n_, checkFeasible(n_)]
# s2.append(oneroute_)
# print(allNodes[25].load)
#
| 31.816412 | 121 | 0.552588 | import copy
import os
import math
import numpy as np
import random
class Node(object):
def __init__(self, idx, x, y, load, minTime, maxTime):
super(Node, self).__init__()
self.idx = idx
self.x = x
self.y = y
self.load = load
self.minTime = minTime
self.maxTime = maxTime
self.profit = 0
def getDist(location1, location2):
x1 = allNodes[location1].x
y1 = allNodes[location1].y
x2 = allNodes[location2].x
y2 = allNodes[location2].y
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def travelTime(startTime, startNode, endNode):
# infeasible
if startTime >= Tmax:
return False
arcCat = speedChooseMat[startNode][endNode]
speed = speedMat[arcCat]
distance = distMat[startNode][endNode]
# determine start in which timezone
for i in range(len(speed)):
if startTime >= speed[i][0] and startTime < speed[i][1]:
timezone = i
break
# calculate time taken
timeTravelled = 0
for i in range(timezone, len(speed)):
maxDistance = (speed[i][1] - startTime) * speed[i][2]
# if cannot reach destination in this timezone, check next timezone
if distance > maxDistance:
distance -= maxDistance
timeTravelled += speed[i][1] - startTime
# can reach in this timezone
else:
timeTravelled += distance / speed[i][2]
distance = 0
break
startTime = speed[i][1]
if distance == 0:
return (timeTravelled)
# cannot reach within the last timezone == infeasible
else:
return False
def checkFeasible(path, timing=False): #timing=True to return time taken of the route
if path == [0,0]:
return 0.0
# check load capacity
load = 0
for i in range(len(path) - 1):
nextNode = path[i + 1]
load += allNodes[nextNode].load
if load > maxCapacity:
return False
# check time
startNode = path[0]
firstNode = path[1]
#get latest starting time to leave depot
if allNodes[firstNode].minTime > travelTime(0,startNode,firstNode):
startTime = allNodes[firstNode].minTime - travelTime(0,startNode,firstNode)
else:
startTime = 0
#calculate time to reach back to the depot
time = 0
profit = 0
for i in range(len(path) - 1):
currNode = path[i]
nextNode = path[i + 1]
timeTaken = travelTime(time, currNode, nextNode)
if timeTaken and time < allNodes[nextNode].maxTime:
time = max(time + timeTaken, allNodes[nextNode].minTime)
else:
return False
profit += allNodes[nextNode].profit
#pass all checks, calculate profit
objFunction = profit - time+startTime
if timing:
return time
if objFunction > 0:
return objFunction
else:
return False
def BFS():
basicSolution = [[0,0] for _ in range(noOfVehicles)]
remainingPickups = copy.deepcopy(pickupNodes)
currOptimalSolution = []
for vehicle in basicSolution:
if remainingPickups != {}:
#initialse the next best pickup
bestTiming = float('inf')
pickupFlag = False
#insert earliest pickup option to allow for more pickups later
for item in remainingPickups:
testPath = copy.deepcopy(vehicle)
testPath.insert(-1, item)
testPath.insert(-1, pickupDeliveryPair[item])
timing = checkFeasible(testPath, timing=True)
score = checkFeasible(testPath)
if timing and score and timing < bestTiming:
bestTiming = timing
bestPath = testPath
pickupFlag = True
if pickupFlag: #check if there is a initial feasible pickup
vehicle = bestPath
remainingPickups.pop(vehicle[-3]) #remove last route taken
#insert the rest based on objectiveFunction
bestScore = checkFeasible(vehicle)
availablePaths = True
while availablePaths:
availablePaths = False
for item in remainingPickups:
testPath = copy.deepcopy(vehicle)
testPath.insert(-1, item) #insert pickup node
testPath.insert(-1, pickupDeliveryPair[item]) #insert delivery node
score = checkFeasible(testPath)
if score and score > bestScore:
availablePaths = True
bestScore = score
bestPath = testPath
if availablePaths:
vehicle = bestPath
remainingPickups.pop(vehicle[-3]) #remove last route taken
currOptimalSolution.append(vehicle)
return currOptimalSolution
def RandomBFS():
basicSolution = [[0, 0] for _ in range(noOfVehicles)]
remainingPickups = copy.deepcopy(pickupNodes)
currOptimalSolution = []
counter = 0
while remainingPickups != {}:
# initialse the next best pickup
bestTiming = float('inf')
pickupFlag = False
# insert earliest pickup option to allow for more pickups later
car = np.random.randint(len(basicSolution))
vehicle = basicSolution[car]
for item in remainingPickups:
testPath = copy.deepcopy(vehicle)
testPath.insert(-1, item)
testPath.insert(-1, pickupDeliveryPair[item])
timing = checkFeasible(testPath, timing=True)
score = checkFeasible(testPath)
counter += 1
if timing and score and timing < bestTiming:
bestTiming = timing
bestPath = testPath
pickupFlag = True
if pickupFlag: # check if there is a initial feasible pickup
vehicle = bestPath
remainingPickups.pop(vehicle[-3]) # remove last route taken
basicSolution[car] = vehicle
if counter > 10000:
break
currOptimalSolution.append(vehicle)
return basicSolution
'''inputs'''
os.chdir('input/')
questionInput = open('Prob-30A-50.txt', 'r')
questionInput = questionInput.readlines()
noOfVehicles = int(questionInput[0])
maxCapacity = int(questionInput[1])
Tmax = int(questionInput[2])
depot = questionInput[5].replace(',', '.').split()
depot = Node(int(depot[0]), float(depot[1]), float(depot[2]), 0, 0, Tmax)
pickupNodes = {}
requests=0
for i in range(9, 999):
# additional logic to detect end of pick up nodes
if len(questionInput[i]) < 3:
break
else:
node = questionInput[i].replace(',', '.').split()
if node==[]:
break
pickupNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]),
float(node[7]))
# count number of requests
requests += 1
deliveryNodes = {}
for i in range(9+requests+3, 9+requests+3+requests):
node = questionInput[i].replace(',', '.').split()
deliveryNodes[int(node[0])] = Node(int(node[0]), float(node[1]), float(node[2]), int(node[4]), float(node[6]),
float(node[7]))
deliveryNodes[int(node[0])].profit = 80 #each node's profit upon delivery
allNodes = {0: depot, **pickupNodes, **deliveryNodes}
# build the pickup delivery matching dict
pickupDeliveryPair = {}
iter = 1
for item in deliveryNodes:
pickupDeliveryPair[iter] = deliveryNodes[item].idx
iter += 1
speedMat = []
# blockcount = 9+requests+3+requests+2 brings you to the first speed pattern in input file
blockcount = 9+requests+3+requests+2
for i in range(5):
speed = []
for j in range(i * 6 + blockcount, i * 6 + (blockcount+4)):
time = questionInput[j].replace(',', '.').split()
speed.append([float(time[0]), float(time[1]), float(time[3])])
speedMat.append(speed)
speedChooseMat = []
# use blockcocunt to read the speed choose matrix
for i in range(blockcount+31, blockcount+31+2*requests+1):
speedChooseMat.append([int(i) for i in questionInput[i].replace(',', '.').split()])
''' processing input '''
# calculate distance matrix
# total lines required = 2*requests + 2 (but we minus 1 because range starts from 0)
distMat = [[0.0] * (2*requests + 2-1) for i in range(2*requests + 2-1)]
for i in range((2*requests + 2-1)):
for j in range(i + 1, (2*requests + 2-1)):
dist = getDist(i, j)
distMat[i][j] = dist
distMat[j][i] = dist
def Profit_Solution(s1):
Totaltime=0
for k_ in range(len(s1)):
s1[k_][1] = checkFeasible(s1[k_][0])
# print(s1[k_][1])
if s1[k_][1] is not False and s1[k_][1] >= 0:
Totaltime += s1[k_][1]
elif s1[k_][1] is False:
Totaltime = 0
break
return Totaltime
''' generate BFS '''
currOptimalSolution = RandomBFS()
''' how to use checking function '''
# currOptimalSolution = [
# [0,1,11,3,13,0],
# [0,10,20,0],
# [0,5,15,6,16,0]] #just an example
# #iterate based on this
# print(currOptimalSolution)
s0=[]
for vehicle in currOptimalSolution:
oneroute=[vehicle, checkFeasible(vehicle)]
s0.append(oneroute)
nn= len(pickupNodes)
profit0 = Profit_Solution(s0)
print('******* inital solution')
print(profit0)
print(*s0, sep = "\n")
# print(*s0)
print('******** inital solution')
# print( 'score:' + str(checkFeasible(vehicle)) + ' route:' + str(vehicle) )
def Remove_node(s, tour, node_pos, data=None): # data defaulted as none because data struc not in use
# print(s[tour])
# print('Remove ' + str(s[tour][0][node_pos])+ ' at ' + str(node_pos) +' from tour'+ str(tour))
del s[tour][0][node_pos]
def Insert_node(s, node, tour, point):
# node_1 = s[tour][point - 1] # Original code uses s[tour][0]
# arr.insert(point, node)
# print('Insert ' + str(node) + ' at ' + str(point)+' from tour'+ str(tour))
s[tour][0].insert(point, node)
# s0 = [[0, 14, 29, 11, 26, 1, 16, 3, 18, 0], 138.45952332463185], [[0, 10, 25, 0], 18.873851271071658], [
# [0, 8, 23, 6, 21, 13, 28, 5, 20, 15, 30, 4, 19, 2, 17, 0], 366.359325517866], [[0, 7, 22, 0], 29.4378156146646], [
# [0, 0], False]
def NEIGHBOR_VRP(s, halfnumberofnode, data=None):
# 1. random pick a tour1
# for i_ in range(4):
n = halfnumberofnode
tour_a = np.random.randint(len(s))
while len(s[tour_a][0]) <= 4:
tour_a = np.random.randint(len(s))
# print (tour_a)
pos_a = np.random.randint(len(s[tour_a][0]) - 2) + 1 # random int from [1, n-2], n-1 is the last node- depot
# 2. random pick a tour 2
tour_b = np.random.randint(len(s))
# print(tour_b)
if tour_a == tour_b and len(s[tour_a][0]) >= 6: # at least two pairs
point_b = pos_a
while point_b == pos_a:
point_b = np.random.randint(len(s[tour_a][0]) - 1) + 1
# remove node_a
sNew = copy.deepcopy(s)
temp_node = sNew[tour_a][0][pos_a]
### need detail code here!
# 2.1 if sNew[tour_a][0][pos_a] is a delivery code: (temp_node>n)
if sNew[tour_a][0][pos_a] > n:
pickup_node = sNew[tour_a][0][pos_a] - n
pos_c = sNew[tour_a][0].index(pickup_node)
# print(sNew[tour_a][0][pos_a])
# print(pickup_node)
sNew[tour_a][0].remove(sNew[tour_a][0][pos_a])
sNew[tour_a][0].remove(pickup_node)
# Remove_node(sNew, tour_a, pos_a, data)
# Remove_node(sNew, tour_a, pos_c, data)
# make sure the sNew is updated after deleting two nodes
point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
while point_c > point_b:
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
Insert_node(sNew, temp_node, tour_a, point_b)
Insert_node(sNew, pickup_node, tour_a, point_c)
return sNew
# 3.2 if sNew[tour_a][0][pos_a] is a pickup node: (temp_node<=n)
elif sNew[tour_a][0][pos_a] <= n:
delivery_node = sNew[tour_a][0][pos_a] + n
pos_c = sNew[tour_a][0].index(delivery_node)
# print(sNew[tour_a][0][pos_a])
# print(delivery_node)
sNew[tour_a][0].remove(sNew[tour_a][0][pos_a])
sNew[tour_a][0].remove(delivery_node)
# Remove_node(sNew, tour_a, pos_c, data)
# Remove_node(sNew, tour_a, pos_a, data)
point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
Insert_node(sNew, temp_node, tour_b, point_b)
point_c = np.random.randint(len(sNew[tour_a][0]) - 1) + 1
while point_c <= point_b:
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
Insert_node(sNew, delivery_node, tour_a, point_c)
return sNew
# 3. if tour1 != tour 2:
elif tour_a != tour_b: # and len(s[tour_a][0]) >=6 : # at least two pairs
point_b = pos_a
while point_b == pos_a:
point_b = np.random.randint(len(s[tour_a][0]) - 1) + 1
# remove node_a
sNew = copy.deepcopy(s)
temp_node = sNew[tour_a][0][pos_a]
### need detail code here!
# 3.1 if sNew[tour_a][0][pos_a] is a delivery code: (temp_node>n)
if sNew[tour_a][0][pos_a] > n:
pickup_node = sNew[tour_a][0][pos_a] - n
pos_c = sNew[tour_a][0].index(pickup_node)
# print(sNew[tour_a][0][pos_a])
# print(pickup_node)
sNew[tour_a][0].remove(sNew[tour_a][0][pos_a])
sNew[tour_a][0].remove(pickup_node)
# Remove_node(sNew, tour_a, pos_a, data)
# Remove_node(sNew, tour_a, pos_c, data)
# make sure the sNew is updated after deleting two nodes
point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
while point_c > point_b:
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
Insert_node(sNew, temp_node, tour_b, point_b)
Insert_node(sNew, pickup_node, tour_b, point_c)
return sNew
# 3.2 if sNew[tour_a][0][pos_a] is a pickup node: (temp_node<=n)
elif sNew[tour_a][0][pos_a] <= n:
delivery_node = sNew[tour_a][0][pos_a] + n
pos_c = sNew[tour_a][0].index(delivery_node)
# print(sNew[tour_a][0][pos_a])
# print(delivery_node)
sNew[tour_a][0].remove(sNew[tour_a][0][pos_a])
sNew[tour_a][0].remove(delivery_node)
# Remove_node(sNew, tour_a, pos_c, data)
# Remove_node(sNew, tour_a, pos_a, data)
point_b = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
Insert_node(sNew, temp_node, tour_b, point_b)
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
while point_c <= point_b:
point_c = np.random.randint(len(sNew[tour_b][0]) - 1) + 1
Insert_node(sNew, delivery_node, tour_b, point_c)
return sNew
# nn=15
# s1= NEIGHBOR_VRP(s0,nn)
# print(s1)
def get_population(S0, halfnodes=nn):
candidate =[]
for n_ in range(20):
s1 = NEIGHBOR_VRP(S0, halfnodes)
profit = Profit_Solution(s1)
routes= []
# for solu in s1:
# # solu[0].pop(0)
# # solu[0].pop()
# route = solu[0]
# # print(route)
# routes += route
# routes.insert(0,0)
# routes.append(0)
candidate.append((s1, profit))
candidate.sort(key=lambda x: x[1], reverse=True)
res= [x[0] for x in candidate]
return res
#GA Part
def selection(candidate):
# retain_rate = 0.3
# graded= candidate.sort(key= lambda x: x[1], reverse=True)
# graded = [x[0] for x in candidate]
graded = [x for x in candidate]
retain_length = int(len(graded) * retain_rate)
parents = graded[:retain_length]
for chromosome in graded[retain_length:]:
if random.random() < random_select_rate:
parents.append(chromosome)
return parents
def rank_route(s):
# routes=[]
for route in s:
temp=[]
route=route.sort(key=lambda x: x[1],reverse=True)
# route= [i[0] for i in route]
# for i in route:
# temp.append(route[0])
# routes.append(temp)
return s
def remove_profit(s):
S=[]
for i in s:
J=[]
for j in i:
J.append(j[0])
S.append(J)
return S
def Crossover(parents):
count=20
target_count = count - len(parents)
# 孩子列表
children = []
while len(children) < target_count:
male_index = random.randint(0, len(parents) - 1)
female_index = random.randint(0, len(parents) - 1)
if male_index != female_index:
male = parents[male_index]
female = parents[female_index]
left = random.randint(0, len(male) - 2)
right = random.randint(left + 1, len(male) - 1)
# 交叉片段
gene1 = male[left:right]
gene2 = female[left:right]
child1_c = male[right:] + male[:right]
child2_c = female[right:] + female[:right]
child1 = child1_c.copy()
child2 = child2_c.copy()
for o in gene2:
child1_c.remove(o)
for o in gene1:
child2_c.remove(o)
child1[left:right] = gene2
child2[left:right] = gene1
child1[right:] = child1_c[0:len(child1) - right]
child1[:left] = child1_c[len(child1) - right:]
child2[right:] = child2_c[0:len(child1) - right]
child2[:left] = child2_c[len(child1) - right:]
children.append(child1)
children.append(child2)
return children
def crossover(parents):
count=20
target_count = count - len(parents)
# 孩子列表
children = []
while len(children) < target_count:
male_index = random.randint(0, len(parents) - 1)
female_index = random.randint(0, len(parents) - 1)
if male_index != female_index:
male = parents[male_index]
female = parents[female_index]
# routeindex = random.sample(range(0,noOfVehicles),int(noOfVehicles/2))
child=[]
used=set([0])
# for index in routeindex:
# child.append(male[index])
# for node in male[index]:
# used.add(node)
for i in range(len(male)):
comb=male[i]+female[i]
comb= list(dict.fromkeys(comb))
comb= [x for x in comb if x not in used]
set1= set(comb)
used.update(set1)
child.append([0]+comb+[0])
children.append(child)
return children
def profit_children(Solution):
res = []
for currOptimalSolution in Solution:
s=[]
for vehicle in currOptimalSolution:
oneroute = [vehicle, checkFeasible(vehicle)]
s.append(oneroute)
res.append(s)
return res
def mutation(children):
for i in range(len(children)):
if random.random() < mutation_rate:
child = children[i]
for j in range(random.randint(1, len(child) - 4)):
child = NEIGHBOR_VRP(child,nn, data=None)
children[i] = child
return children
def find_max(pop):
max=[]
for s1 in pop:
profit = Profit_Solution(s1)
max.append((s1, profit))
max.sort(key=lambda x: x[1], reverse=True)
return max[0]
pop= get_population(s0,nn)
retain_rate=0.3
random_select_rate=0.5
mutation_rate=0.4
#
# selected = selection(pop)
# # selected=rank_route(selected)
# grandpare = remove_profit(selected)
#
# crossed = crossover(grandpare)
# children = profit_children(crossed)
# children=mutation(children)
#
# pop= selected+children
# # pop = get_population(newpop,nn)
# selected = selection(pop)
maxprofit=0
register = []
i = 0
itter_time=100
while i < itter_time:
# 选择繁殖个体群
selected = selection(pop)
# selected=rank_route(selected)
grandpare = remove_profit(selected)
# 交叉繁殖
crossed = crossover(grandpare)
children= profit_children(crossed)
# 变异操作
children = mutation(children)
# 更新种群
pop = selected + children
[bestroute, bestprofit] = find_max(pop)
if maxprofit < bestprofit:
maxprofit = bestprofit
# print('New best profit is ' + str(maxprofit))
# print('New best route is ' + str(bestroute))
i=i+1
print('Current population is :')
print(*pop,sep='\n')
print('---------------------------------------------------------------------')
# print('The best profit is ' + str(maxprofit))
# print('The best route is ' + str(bestroute))
print(maxprofit)
# def decoding2(corssed):
# decoded=[]
# for routes in crossed:
# start = routes.index(0)
# if routes[start + 1] == 0:
# start += 1
#
# newroutes = routes[start:] + routes[:start]
# routes = newroutes
# routes.append(0)
# # print(routes)
# l = []
# for w in routes:
# if start: start = l.append([]) # l.append() returns None, that is falsey...
# if w != 0:
# l[-1].append(w)
# if w == 0: start = 1
#
# res = []
# for m in l:
# if m != []:
# res.append([0] + m + [0])
# while len(res) < noOfVehicles:
# res.append([0, 0])
#
# # soluion0 = [res, checkFeasible(res)]
# # soluion0.append(oneroute)
# decoded.append(res)
# return decoded
# s2=[]
# for n_ in crossed_res:
# oneroute_=[n_, checkFeasible(n_)]
# s2.append(oneroute_)
# print(allNodes[25].load)
#
| 16,492 | -2 | 502 |
8ae7507b30f5c4ecb28d1b677c6c124461738cf7 | 6,945 | py | Python | opencood/data_utils/post_processor/ciassd_postprocessor.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | opencood/data_utils/post_processor/ciassd_postprocessor.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | opencood/data_utils/post_processor/ciassd_postprocessor.py | YuanYunshuang/OpenCOOD | 98e07eb45f7fdcd32518b2cf8f9052f73ca80bec | [
"Apache-2.0"
] | null | null | null | """
3D Anchor Generator for Voxel
"""
import math
import sys
import numpy as np
import torch
import torch.nn.functional as F
from opencood.data_utils.post_processor.voxel_postprocessor \
import VoxelPostprocessor
from opencood.utils import box_utils
| 40.852941 | 108 | 0.57077 | """
3D Anchor Generator for Voxel
"""
import math
import sys
import numpy as np
import torch
import torch.nn.functional as F
from opencood.data_utils.post_processor.voxel_postprocessor \
import VoxelPostprocessor
from opencood.utils import box_utils
class CiassdPostprocessor(VoxelPostprocessor):
def __init__(self, anchor_params, train):
super(CiassdPostprocessor, self).__init__(anchor_params, train)
self.train = train
self.anchor_num = self.params['anchor_args']['num']
def post_process(self, data_dict, output_dict):
"""
Process the outputs of the model to 2D/3D bounding box.
Step1: convert each cav's output to bounding box format
Step2: project the bounding boxes to ego space.
Step:3 NMS
Parameters
----------
data_dict : dict
The dictionary containing the origin input data of model.
output_dict :dict
The dictionary containing the output of the model.
Returns
-------
pred_box3d_tensor : torch.Tensor
The prediction bounding box tensor after NMS.
gt_box3d_tensor : torch.Tensor
The groundtruth bounding box tensor.
"""
# the final bounding box list
global batch_num_box_count
pred_box3d_original_list = []
pred_box3d_list = []
pred_box2d_list = []
for cav_id, cav_content in data_dict.items():
assert cav_id in output_dict
# the transformation matrix to ego space
if 'transformation_matrix' in cav_content:
transformation_matrix = cav_content['transformation_matrix']
else:
transformation_matrix = torch.from_numpy(np.identity(4)).float().\
to(cav_content['anchor_box'].device)
# (H, W, anchor_num, 7)
anchor_box = cav_content['anchor_box']
# prediction result
preds_dict = output_dict[cav_id]['preds_dict_stage1']
# preds
prob = preds_dict['cls_preds']
prob = torch.sigmoid(prob.permute(0, 2, 3, 1).contiguous())
reg = preds_dict['box_preds'].permute(0, 2, 3, 1).contiguous()
iou = preds_dict['iou_preds'].permute(0, 2, 3, 1).contiguous().reshape(1, -1)
dir = preds_dict['dir_cls_preds'].permute(0, 2, 3, 1).contiguous().reshape(1, -1, 2)
# convert regression map back to bounding box
# (N, W*L*anchor_num, 7)
batch_box3d = self.delta_to_boxes3d(reg, anchor_box, False)
mask = torch.gt(prob, self.params['target_args']['score_threshold'])
batch_num_box_count = [int(m.sum()) for m in mask]
mask = mask.view(1, -1)
mask_reg = mask.unsqueeze(2).repeat(1, 1, 7)
# during validation/testing, the batch size should be 1
if not self.train:
assert batch_box3d.shape[0] == 1
boxes3d = torch.masked_select(batch_box3d.view(-1, 7), mask_reg[0]).view(-1, 7)
scores = torch.masked_select(prob.view(-1), mask[0])
dir_labels = torch.max(dir, dim=-1)[1]
dir_labels = dir_labels[mask]
# top_labels = torch.zeros([scores.shape[0]], dtype=torch.long).cuda()
if scores.shape[0] != 0:
iou = (iou + 1) * 0.5
scores = scores * torch.pow(iou.masked_select(mask), 4)
# correct_direction
top_labels = (boxes3d[..., -1] > 0) ^ (dir_labels.byte() == 1)
boxes3d[..., -1] += torch.where(top_labels, torch.tensor(np.pi).type_as(boxes3d),
torch.tensor(0.0).type_as(boxes3d))
pred_box3d_original_list.append(boxes3d.detach())
# convert output to bounding box
if len(boxes3d) != 0:
# (N, 8, 3)
boxes3d_corner = box_utils.boxes_to_corners_3d(boxes3d, order=self.params['order'])
# (N, 8, 3)
projected_boxes3d = box_utils.project_box3d(boxes3d_corner, transformation_matrix)
# convert 3d bbx to 2d, (N,4)
projected_boxes2d = box_utils.corner_to_standup_box_torch(projected_boxes3d)
# (N, 5)
boxes2d_score = torch.cat((projected_boxes2d, scores.unsqueeze(1)), dim=1)
pred_box2d_list.append(boxes2d_score)
pred_box3d_list.append(projected_boxes3d)
if len(pred_box2d_list) ==0 or len(pred_box3d_list) == 0:
return None, None
# shape: (N, 5)
pred_box2d_list = torch.vstack(pred_box2d_list)
# scores
scores = pred_box2d_list[:, -1]
# predicted 3d bbx
pred_box3d_tensor = torch.vstack(pred_box3d_list)
pred_box3d_original = torch.vstack(pred_box3d_original_list)
if not self.train:
# remove large bbx
keep_index_1 = box_utils.remove_large_pred_bbx(pred_box3d_tensor)
keep_index_2 = box_utils.remove_bbx_abnormal_z(pred_box3d_tensor)
keep_index = torch.logical_and(keep_index_1, keep_index_2)
pred_box3d_tensor = pred_box3d_tensor[keep_index]
scores = scores[keep_index]
# nms
keep_index = box_utils.nms_rotated(pred_box3d_tensor,
scores,
self.params['nms_thresh']
)
pred_box3d_tensor = pred_box3d_tensor[keep_index]
# select cooresponding score
scores = scores[keep_index]
# filter out the prediction out of the range.
mask = \
box_utils.get_mask_for_boxes_within_range_torch(pred_box3d_tensor)
pred_box3d_tensor = pred_box3d_tensor[mask, :, :]
scores = scores[mask]
assert scores.shape[0] == pred_box3d_tensor.shape[0]
return pred_box3d_tensor, scores
else:
cur_idx = 0
batch_pred_boxes3d = []
batch_scores = []
for n in batch_num_box_count:
cur_boxes = pred_box3d_tensor[cur_idx:cur_idx+n]
cur_scores = scores[cur_idx:cur_idx+n]
# nms
keep_index = box_utils.nms_rotated(cur_boxes,
cur_scores,
self.params['nms_thresh']
)
cur_boxes = pred_box3d_original[cur_idx:cur_idx+n] # [:, [0, 1, 2, 5, 4, 3, 6]] # hwl -> lwh
batch_pred_boxes3d.append(cur_boxes[keep_index])
batch_scores.append(cur_scores[keep_index])
cur_idx += n
return batch_pred_boxes3d, batch_scores
| 179 | 6,485 | 23 |