hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9f8ce5ff36ed5a6dad65161d489c3be32cebf45
| 2,583
|
py
|
Python
|
dolphinscheduler-python/pydolphinscheduler/tests/utils/test_date.py
|
InterestingLab/dolphinscheduler
|
2ff76c7e7330c166fba1e514aba6ddf2dbe74f57
|
[
"Apache-2.0"
] | 2,086
|
2021-04-15T20:28:24.000Z
|
2022-03-31T22:30:49.000Z
|
dolphinscheduler-python/pydolphinscheduler/tests/utils/test_date.py
|
InterestingLab/dolphinscheduler
|
2ff76c7e7330c166fba1e514aba6ddf2dbe74f57
|
[
"Apache-2.0"
] | 3,789
|
2021-04-15T16:00:32.000Z
|
2022-03-31T13:38:53.000Z
|
dolphinscheduler-python/pydolphinscheduler/tests/utils/test_date.py
|
InterestingLab/dolphinscheduler
|
2ff76c7e7330c166fba1e514aba6ddf2dbe74f57
|
[
"Apache-2.0"
] | 1,170
|
2021-04-16T06:40:24.000Z
|
2022-03-31T22:30:51.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test utils.date module."""
from datetime import datetime
import pytest
from pydolphinscheduler.utils.date import FMT_STD, conv_from_str, conv_to_schedule
curr_date = datetime.now()
@pytest.mark.parametrize(
"src,expect",
[
(curr_date, curr_date.strftime(FMT_STD)),
(datetime(2021, 1, 1), "2021-01-01 00:00:00"),
(datetime(2021, 1, 1, 1), "2021-01-01 01:00:00"),
(datetime(2021, 1, 1, 1, 1), "2021-01-01 01:01:00"),
(datetime(2021, 1, 1, 1, 1, 1), "2021-01-01 01:01:01"),
(datetime(2021, 1, 1, 1, 1, 1, 1), "2021-01-01 01:01:01"),
],
)
def test_conv_to_schedule(src: datetime, expect: str) -> None:
"""Test function conv_to_schedule."""
assert expect == conv_to_schedule(src)
@pytest.mark.parametrize(
"src,expect",
[
("2021-01-01", datetime(2021, 1, 1)),
("2021/01/01", datetime(2021, 1, 1)),
("20210101", datetime(2021, 1, 1)),
("2021-01-01 01:01:01", datetime(2021, 1, 1, 1, 1, 1)),
("2021/01/01 01:01:01", datetime(2021, 1, 1, 1, 1, 1)),
("20210101 010101", datetime(2021, 1, 1, 1, 1, 1)),
],
)
def test_conv_from_str_success(src: str, expect: datetime) -> None:
"""Test function conv_from_str success case."""
assert expect == conv_from_str(
src
), f"Function conv_from_str convert {src} not expect to {expect}."
@pytest.mark.parametrize(
"src",
[
"2021-01-01 010101",
"2021:01:01",
"202111",
"20210101010101",
"2021:01:01 01:01:01",
],
)
def test_conv_from_str_not_impl(src: str) -> None:
"""Test function conv_from_str fail case."""
with pytest.raises(
NotImplementedError, match=".*? could not be convert to datetime for now."
):
conv_from_str(src)
| 32.696203
| 82
| 0.648084
|
9bca86b0ebcc59c891b64863a85a32629a81ef42
| 2,007
|
py
|
Python
|
src/hera/v1/resources.py
|
mostaphaRoudsari/hera-workflows
|
2f82a7690ca42e48c3ba2f9911dbd4715aa2e093
|
[
"MIT"
] | null | null | null |
src/hera/v1/resources.py
|
mostaphaRoudsari/hera-workflows
|
2f82a7690ca42e48c3ba2f9911dbd4715aa2e093
|
[
"MIT"
] | null | null | null |
src/hera/v1/resources.py
|
mostaphaRoudsari/hera-workflows
|
2f82a7690ca42e48c3ba2f9911dbd4715aa2e093
|
[
"MIT"
] | null | null | null |
"""Holds the resource specification"""
from typing import Optional, Union
from hera.v1.empty_dir_volume import EmptyDirVolume
from hera.v1.existing_volume import ExistingVolume
from hera.v1.validators import validate_storage_units
from hera.v1.volume import Volume
from pydantic import BaseModel, root_validator, validator
class Resources(BaseModel):
"""A representation of a collection of resources that are requested to be consumed by a task for execution.
Attributes
----------
min_cpu: Union[int, float] = 1
The minimum amount of CPU to request.
max_cpu: Union[int, float] = None
The maximum amount of CPU to request. If this is not specified it's automatically set to min_cpu.
min_mem: str = '4Gi'
The minimum amount of memory to request.
max_mem: Optional[str]
The maximum amount of memory to request. If this is not specified it's automatically set to min_mem.
gpus: Optional[int]
The number of GPUs to request as part of the workflow.
volumes: Optional[Volume]
The volumes to dynamically provision.
"""
min_cpu: Union[int, float] = 1
max_cpu: Union[int, float] = None
min_mem: str = '4Gi'
max_mem: Optional[str] = None
gpus: Optional[int] = None
volume: Optional[Volume] = None
existing_volume: Optional[ExistingVolume] = None
empty_dir_volume: Optional[EmptyDirVolume] = None
@validator('min_mem', 'max_mem')
def valid_units(cls, value):
"""Validates that memory specifications have correct units"""
validate_storage_units(value)
return value
@root_validator
def valid_values(cls, values):
"""Validates that cpu values are valid"""
assert values['min_cpu'] >= 0, 'cannot specify a negative value for the min CPU field'
if 'max_cpu' in values and values.get('max_cpu'):
assert values['min_cpu'] <= values['max_cpu'], 'cannot specify a min CPU value smaller than max CPU'
return values
| 36.490909
| 112
| 0.695067
|
ceb7a9075b574a2611ae6931bbddefbbe5fe0d45
| 2,677
|
py
|
Python
|
runtime/python/Lib/ctypes/test/test_refcounts.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
Thonny/Lib/ctypes/test/test_refcounts.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
Thonny/Lib/ctypes/test/test_refcounts.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
import unittest
from test import support
import ctypes
import gc
MyCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int)
OtherCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong)
import _ctypes_test
dll = ctypes.CDLL(_ctypes_test.__file__)
class RefcountTestCase(unittest.TestCase):
@support.refcount_test
def test_1(self):
from sys import getrefcount as grc
f = dll._testfunc_callback_i_if
f.restype = ctypes.c_int
f.argtypes = [ctypes.c_int, MyCallback]
def callback(value):
#print "called back with", value
return value
self.assertEqual(grc(callback), 2)
cb = MyCallback(callback)
self.assertGreater(grc(callback), 2)
result = f(-10, cb)
self.assertEqual(result, -18)
cb = None
gc.collect()
self.assertEqual(grc(callback), 2)
@support.refcount_test
def test_refcount(self):
from sys import getrefcount as grc
def func(*args):
pass
# this is the standard refcount for func
self.assertEqual(grc(func), 2)
# the CFuncPtr instance holds at least one refcount on func:
f = OtherCallback(func)
self.assertGreater(grc(func), 2)
# and may release it again
del f
self.assertGreaterEqual(grc(func), 2)
# but now it must be gone
gc.collect()
self.assertEqual(grc(func), 2)
class X(ctypes.Structure):
_fields_ = [("a", OtherCallback)]
x = X()
x.a = OtherCallback(func)
# the CFuncPtr instance holds at least one refcount on func:
self.assertGreater(grc(func), 2)
# and may release it again
del x
self.assertGreaterEqual(grc(func), 2)
# and now it must be gone again
gc.collect()
self.assertEqual(grc(func), 2)
f = OtherCallback(func)
# the CFuncPtr instance holds at least one refcount on func:
self.assertGreater(grc(func), 2)
# create a cycle
f.cycle = f
del f
gc.collect()
self.assertEqual(grc(func), 2)
class AnotherLeak(unittest.TestCase):
def test_callback(self):
import sys
proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int)
def func(a, b):
return a * b * 2
f = proto(func)
a = sys.getrefcount(ctypes.c_int)
f(1, 2)
self.assertEqual(sys.getrefcount(ctypes.c_int), a)
if __name__ == '__main__':
unittest.main()
| 26.245098
| 81
| 0.584983
|
e9ba50d31ec7223f9a42539e5b17ff0bd326c85a
| 848
|
py
|
Python
|
lang/Python/sparkline-in-unicode-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/sparkline-in-unicode-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/sparkline-in-unicode-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Unicode: 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608
bar = '▁▂▃▄▅▆▇█'
barcount = len(bar)
def sparkline(numbers):
mn, mx = min(numbers), max(numbers)
extent = mx - mn
sparkline = ''.join(bar[min([barcount - 1,
int((n - mn) / extent * barcount)])]
for n in numbers)
return mn, mx, sparkline
if __name__ == '__main__':
import re
for line in ("0 0 1 1; 0 1 19 20; 0 999 4000 4999 7000 7999;"
"1 2 3 4 5 6 7 8 7 6 5 4 3 2 1;"
"1.5, 0.5 3.5, 2.5 5.5, 4.5 7.5, 6.5 ").split(';'):
print(("\nNumbers:", line))
numbers = [float(n) for n in re.split(r'[\s,]+', line.strip())]
mn, mx, sp = sparkline(numbers)
print((' min: %5f; max: %5f' % (mn, mx)))
print((" " + sp))
| 32.615385
| 71
| 0.474057
|
a5d2f41fb21c6ca47d7c7714a4bb00db522ef23b
| 25,340
|
py
|
Python
|
scripts/modelTrainer.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
scripts/modelTrainer.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
scripts/modelTrainer.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
import sys
import os
import json
import itertools
import sim
import operator
from collections import defaultdict
BIG = [0]
SMALL = [1, 2, 3]
#PATH = "python /scratch/nas/1/dn/sniper-6.0/benchmarks/SimResults/myNumpy0102.py "
PATH = "python /scratch/nas/3/dn/sniper-6.0/scripts/predictor_blackbox.py "
MODELNAME = "/scratch/nas/3/dn/sniper-6.0/scripts/MICRO_sys_predictor_full_trained.p"
accumulated_stats = []
#STATSORDER = ['brhits', 'brmisses', 'dramreqs', 'dramreads', 'dramwrites', 'dtlbaccess', 'dtlbmisses', 'itlbaccess', 'itlbmisses', 'stlbaccess',
# 'stlbmisses', 'dl1loads', 'dl1misses', 'dl1stores', 'il1loads', 'il1misses', 'il1stores','l2loads', 'l2misses', 'l2stores',
# 'l3loads', 'l3misses', 'l3stores', 'uopbr', 'uopfpaddsub', 'uopfpmuldiv', 'uopgeneric', 'uopld', 'uopst', 'uoptotal']
params_to_use_per_thread = [
'uopBR_Norm', 'uopFPtotal_Norm', 'uopGeneric_Norm', 'uopLD_Norm',
'DL1miss_Norm', 'L2miss_Norm', 'L3miss_Norm', 'IL1ld_div_DL1ld_Norm',
'L2miss_div_DL1miss_Norm', 'L3miss_div_L2miss_Norm',
'L3miss_div_DL1miss_Norm'
] # 11 in total
STATSORDER = [['/', 'uopbr', 'uoptotal'], [
'/', ['+', 'uopfpaddsub', 'uopfpmuldiv'], 'uoptotal'
], ['/', 'uopgeneric',
'uoptotal'], ['/', 'uopld', 'uoptotal'], ['/', 'uopst', 'uoptotal'], [
'/', 'dl1misses', ['+', 'dl1loads', 'dl1stores']
], ['/', 'l2misses', ['+', 'l2loads', 'l2stores']], [
'/', 'l3misses', ['+', 'l3loads', 'l3stores']
], ['/', 'il1loads', 'dl1loads'], ['/', 'l2misses', 'dl1misses'],
['/', 'l3misses', 'l2misses'], ['/', 'l3misses', 'dl1misses']]
def getScoreMetricTime(thread_id):
return long(sim.stats.get('thread', thread_id, 'nonidle_elapsed_time'))
def getScoreMetricInstructions(thread_id):
return long(sim.stats.get('thread', thread_id, 'instruction_count'))
class Thread:
global BIG
global SMALL
global PATH
global STATSORDER
def __init__(self, thread_id):
self.thread_id = thread_id
self.core = None
self.runnable = False
self.unscheduled = False
self.BigIpc = 0.1
self.SmallIpc = 0.1
self.getScoreMetric = lambda: getScoreMetricInstructions(thread_id)
self.score = 0 # Accumulated score
self.prevIPC = 0.1
self.prevCore = None
self.train_cycle = 1
self.ipc = 0
self.cycles = 0
self.mapping = []
self.core_swap = 0#did thread swap cores between quantums
self.thread_stats = []
self.hetero_score = 0 # Accumulated fairness score
self.metric_last = 0 # State at start of last interval
sim.thread.set_thread_affinity(self.thread_id, ())
def updateScore(self, stats):
self.cycles = stats['time'][self.core].delta * sim.dvfs.get_frequency(
self.core) / 1e9 # convert fs to cycles
instrs = stats['coreinstrs'][self.core].delta
self.ipc = instrs / (self.cycles or 1)
self.thread_stats = self.getStats(stats)
def getStats(self, stats):
result = []
value1 = 0
value2 = 0
result.append(self.core_swap)
for key in STATSORDER:
if type(key) == list:
if type(key[1]) == list:
k_value1 = (stats[key[1][1]])[self.core].delta
k_value2 = (stats[key[1][2]])[self.core].delta
if key[1][0] == '/':
if k_value2 != 0:
value1 = (k_value1 / k_value2)
else:
value1 = 0
elif key[1][0] == '+':
value1 = (k_value1 + k_value2)
else:
value1 = (stats[key[1]])[self.core].delta
if type(key[2]) == list:
k_value1 = (stats[key[2][1]])[self.core].delta
k_value2 = (stats[key[2][2]])[self.core].delta
if key[2][0] == '/':
if k_value2 != 0:
value2 = (k_value1 / k_value2)
else:
value1 = 0
elif key[2][0] == '+':
value2 = (k_value1 + k_value2)
else:
value2 = (stats[key[2]])[self.core].delta
if key[0] == '/':
if value2 != 0:
result.append(value1 / value2)
else:
result.append(0)
elif key[0] == '+':
result.append(value1 + value2)
else:
result.append((stats[key])[self.core].delta)
return result
def normalizeStats(self, stats):
normalized_stats = []
for index, value in enumerate(stats):
min_value = self.getMin(self.accumulated_non_normalized_stats,
index)
max_value = self.getMax(self.accumulated_non_normalized_stats,
index)
normalized_stats.append(
(value - min_value) / (max_value - min_value))
return normalized_stats
def getMax(self, accumulated_non_normalized_stats, index):
max_value = -5000
for stat_list in accumulated_non_normalized_stats:
if stat_list[index] > max_value:
max_value = stat_list[index]
return max_value
def getMin(self, accumulated_non_normalized_stats, index):
min_value = 5000
for stat_list in accumulated_non_normalized_stats:
if stat_list[index] < min_value:
min_value = stat_list[index]
return min_value
def updateHeteroScore(self):
metric_now = self.getScoreMetric()
self.hetero_score += metric_now - self.metric_last
self.metric_last = metric_now
def setScore(self, score):
self.score = score
def setHeteroScore(self, hetero_score):
self.hetero_score = hetero_score
self.metric_last = self.getScoreMetric()
def setCore(self, core_id, time=-1):
self.prevCore = self.core
self.core = core_id
if core_id is None:
self.updateHeteroScore()
self.last_scheduled_out = time
sim.thread.set_thread_affinity(self.thread_id, ())
else:
self.last_scheduled_in = time
sim.thread.set_thread_affinity(self.thread_id, [
c == core_id for c in range(sim.config.ncores)
])
def update_core_id(self, core_id):
self.prevCore = self.core
self.core = core_id
if self.prevCore:
if self.prevCore != self.core:
self.core_swap = 1
else:
self.core_swap = 0
def send_stats(self, stats):
statlist = []
if self.core in BIG:
statlist.append(self.BigIpc)
for key in STATSORDER:
statlist.append((stats[key])[self.core].delta)
jlist = json.dumps(statlist, separators=(',', ':'))
proc = os.popen(PATH + str(0) + " " + jlist).read()
#result = json.loads(proc)
#code above does not work check why
result = proc
return result
if self.core in SMALL:
statlist.append(self.SmallIpc)
for key in STATSORDER:
statlist.append((stats[key])[self.core].delta)
jlist = json.dumps(statlist, separators=(',', ':'))
proc = os.popen(PATH + str(1) + " " + jlist).read()
#result = json.loads(proc)
#code above does not work check why
result = proc
fresult = float(result)
return fresult
def __repr__(self):
r = str(self.thread_id) + " "
if self.core in BIG:
r += " " + "{:.4f}".format(self.ipc) + " "
r += " *" + "{:.4f}".format(self.ipc) + " "
elif self.core in SMALL:
r += " *" + "{:.4f}".format(self.ipc) + " "
r += " " + "{:.4f}".format(self.ipc) + " "
else:
r += " ?" + "{:.4f}".format(self.ipc) + " "
r += " ?" + "{:.4f}".format(self.ipc) + " "
r += "{:.4f}".format(self.score) + " "
r += "R " if self.runnable else "W "
if self.core is not None:
r += str(self.core)
else:
r += "N"
return r
class SchedulerLocality:
predicted_ipc = 0
predicted_mapping = []
prev_predicted_ipc = 0
prediction_gap = []
train_cycle = 0
train_data = []
system_ipcs = []
hetero_timer = 0
def setup(self, args):
print "setup"
self.icount_last = [0 for core in range(sim.config.ncores)]
self.last_reschedule = 0
self.sd = sim.util.StatsDelta()
self.stats = {
'time': [
self.getStatsGetter('performance_model', core, 'elapsed_time')
for core in range(sim.config.ncores)
],
'ffwd_time': [
self.getStatsGetter('fastforward_performance_model', core,
'fastforwarded_time')
for core in range(sim.config.ncores)
],
'instrs': [
self.getStatsGetter('performance_model', core,
'instruction_count')
for core in range(sim.config.ncores)
],
'coreinstrs': [
self.getStatsGetter('core', core, 'instructions')
for core in range(sim.config.ncores)
],
'brhits': [
self.getStatsGetter('branch_predictor', core, 'num-correct')
for core in range(sim.config.ncores)
],
'brmisses': [
self.getStatsGetter('branch_predictor', core, 'num-incorrect')
for core in range(sim.config.ncores)
],
'dramreqs': [
self.getStatsGetter('dram-queue', core, 'num-requests')
for core in range(sim.config.ncores)
],
'dramreads': [
self.getStatsGetter('dram', core, 'reads')
for core in range(sim.config.ncores)
],
'dramwrites': [
self.getStatsGetter('dram', core, 'writes')
for core in range(sim.config.ncores)
],
'dtlbaccess': [
self.getStatsGetter('dtlb', core, 'access')
for core in range(sim.config.ncores)
],
'dtlbmisses': [
self.getStatsGetter('dtlb', core, 'miss')
for core in range(sim.config.ncores)
],
'itlbaccess': [
self.getStatsGetter('itlb', core, 'access')
for core in range(sim.config.ncores)
],
'itlbmisses': [
self.getStatsGetter('itlb', core, 'miss')
for core in range(sim.config.ncores)
],
'stlbaccess': [
self.getStatsGetter('stlb', core, 'access')
for core in range(sim.config.ncores)
],
'stlbmisses': [
self.getStatsGetter('stlb', core, 'miss')
for core in range(sim.config.ncores)
],
'dl1loads': [
self.getStatsGetter('L1-D', core, 'loads')
for core in range(sim.config.ncores)
],
'dl1misses': [
self.getStatsGetter('L1-D', core, 'load-misses')
for core in range(sim.config.ncores)
],
'dl1stores': [
self.getStatsGetter('L1-D', core, 'stores')
for core in range(sim.config.ncores)
],
'il1loads': [
self.getStatsGetter('L1-I', core, 'loads')
for core in range(sim.config.ncores)
],
'il1misses': [
self.getStatsGetter('L1-I', core, 'load-misses')
for core in range(sim.config.ncores)
],
'il1stores': [
self.getStatsGetter('L1-I', core, 'stores')
for core in range(sim.config.ncores)
],
'l2loads': [
self.getStatsGetter('L2', core, 'loads')
for core in range(sim.config.ncores)
],
'l2misses': [
self.getStatsGetter('L2', core, 'load-misses')
for core in range(sim.config.ncores)
],
'l2stores': [
self.getStatsGetter('L2', core, 'stores')
for core in range(sim.config.ncores)
],
'l3loads': [
self.getStatsGetter('L3', core, 'loads')
for core in range(sim.config.ncores)
],
'l3misses': [
self.getStatsGetter('L3', core, 'load-misses')
for core in range(sim.config.ncores)
],
'l3stores': [
self.getStatsGetter('L3', core, 'stores')
for core in range(sim.config.ncores)
],
'uopbr': [
self.getStatsGetter('interval_timer', core, 'uop_branch')
for core in range(sim.config.ncores)
],
'uopfpaddsub': [
self.getStatsGetter('interval_timer', core, 'uop_fp_addsub')
for core in range(sim.config.ncores)
],
'uopfpmuldiv': [
self.getStatsGetter('interval_timer', core, 'uop_fp_muldiv')
for core in range(sim.config.ncores)
],
'uopgeneric': [
self.getStatsGetter('interval_timer', core, 'uop_generic')
for core in range(sim.config.ncores)
],
'uopld': [
self.getStatsGetter('interval_timer', core, 'uop_load')
for core in range(sim.config.ncores)
],
'uopst': [
self.getStatsGetter('interval_timer', core, 'uop_store')
for core in range(sim.config.ncores)
],
'uoptotal': [
self.getStatsGetter('interval_timer', core, 'uops_total')
for core in range(sim.config.ncores)
],
}
args = dict(enumerate((args or '').split(':')))
interval_ns = long(args.get(0, None) or 10000000)
scheduler_type = args.get(1, 'equal_time')
core_mask = args.get(2, '')
if scheduler_type == 'equal_time':
self.getScoreMetric = getScoreMetricTime
elif scheduler_type == 'equal_instructions':
self.getScoreMetric = getScoreMetricInstructions
else:
raise ValueError('Invalid scheduler type %s' % scheduler_type)
if core_mask:
core_mask = map(int,
core_mask.split(',')) + [0] * sim.config.ncores
self.cores = [
core for core in range(sim.config.ncores) if core_mask[core]
]
else:
self.cores = range(sim.config.ncores)
sim.util.Every(
1000000 * sim.util.Time.NS,
self.periodic,
statsdelta=self.sd,
roi_only=True)
self.threads = {}
self.last_core = 0
def hook_thread_start(self, thread_id, time):
self.threads[thread_id] = Thread(thread_id)
self.threads[thread_id].runnable = True
# Initial assignment: one thread per core until cores are exhausted
if self.last_core < len(self.cores):
self.threads[thread_id].setCore(self.cores[self.last_core],
sim.stats.time())
self.last_core += 1
else:
self.threads[thread_id].setCore(None, sim.stats.time())
def hook_thread_exit(self, thread_id, time):
self.hook_thread_stall(thread_id, 'exit', time)
def hook_thread_stall(self, thread_id, reason, time):
if reason == 'unscheduled':
# Ignore calls due to the thread being scheduled out
self.threads[thread_id].unscheduled = True
else:
core = self.threads[thread_id].core
self.threads[thread_id].setCore(None, time)
self.threads[thread_id].runnable = False
# Schedule a new thread (runnable, but not running) on this free core
threads = [
thread for thread in self.threads.values()
if thread.runnable and thread.core is None
]
if threads:
# Order by score
threads.sort(key=lambda thread: thread.score)
threads[0].setCore(core, time)
def hook_thread_resume(self, thread_id, woken_by, time):
if self.threads[thread_id].unscheduled:
# Ignore calls due to the thread being scheduled back in
self.threads[thread_id].unscheduled = False
else:
self.threads[thread_id].setHeteroScore(
max([thread.hetero_score for thread in self.threads.values()]))
self.threads[thread_id].runnable = True
#If there is a free core, move us there now
used_cores = set([
thread.core for thread in self.threads.values()
if thread.core is not None
])
free_cores = set(self.cores) - used_cores
if len(free_cores):
self.threads[thread_id].setCore(list(free_cores)[0], time)
def hook_thread_migrate(self, threadid, coreid, time):
thread = self.findThread(self.threads, threadid)
thread.update_core_id(coreid)
def getSystemIPCForPreviousQuantum(self, threads):
system_ipc = 0
for thread in threads:
system_ipc += thread.ipc
return system_ipc
def updateTrainData(self, threads_to_train):
temp = []
for thread in threads_to_train:
if thread.thread_stats:
temp.extend(thread.thread_stats)
else:
temp.extend([0] * len(STATSORDER))
ipc = self.getSystemIPCForPreviousQuantum(threads_to_train)
self.system_ipcs.append(ipc)
self.train_data.append(temp)
# def predict(self, a, b, c, d):
# a = json.dumps(a, separators=(',', ':'))
# b = json.dumps(b, separators=(',', ':'))
# c = json.dumps(c, separators=(',', ':'))
# d = json.dumps(d, separators=(',', ':'))
# proc = os.popen(PATH + str(1) + " " + MODELNAME + " " + a + " " + b +
# " " + c + " " + d).read()
# #result = json.loads(proc)
# #code above does not work check why
# result = proc
# #print(result)
# #do sys call
# #syscall(train_data)
# return result
def findThread(self, threads, thread_id):
for thread in self.threads.values():
if thread.thread_id == thread_id:
return thread
def periodic(self, time, time_delta):
quantum = int(sim.stats.time() / 1e12)
order = ""
# Update mapper thread scores
[
thread.updateScore(self.stats) for thread in self.threads.values()
if thread.core is not None
]
threads_to_train = [
thread for thread in self.threads.values()
if thread.core is not None
]
threads_to_train.sort(key=lambda thread: thread.core)
combination_size = len(BIG) + len(SMALL)
train_intervals = [200,400,600]
if len(threads_to_train) >= 4:
self.updateTrainData(threads_to_train)
for train_interval in train_intervals:
if quantum % train_interval == 0:
if len(threads_to_train) >= 4:
model_name = "/scratch/nas/3/dn/sniper-6.0/scripts/modelWithInterval"+str(train_interval)+".p"
self.train(self.train_data, quantum, train_interval, model_name)
def train(self, train_data, quantum, train_interval, model_name=MODELNAME):
combination_size = len(BIG) + len(SMALL)
start_index = (quantum * combination_size) - (train_interval * combination_size)
if start_index < 0:
return
jlist = json.dumps(train_data[start_index:], separators=(',', ':'))
statList = json.dumps(self.system_ipcs[start_index:], separators=(',', ':'))
proc = os.popen(PATH + str(0) + " " + model_name + " " + jlist + " " +
statList + " ").read()
#result = json.loads(proc)
#code above does not work check why
result = proc
#do sys call
#syscall(train_data)
def getStatsGetter(self, component, core, metric):
# Some components don't exist (i.e. DRAM reads on cores that don't have a DRAM controller),
# return a special object that always returns 0 in these cases
try:
return self.sd.getter(component, core, metric)
print ""
except:
class Zero():
def __init__(self):
self.delta = 0
def update(self):
pass
return Zero()
def testPrint(self):
print '----------- Quantum ', int(
sim.stats.time() / 1e12), '------------'
total_ipc = 0
for thread in self.threads.values():
if thread.core in BIG:
total_ipc += thread.BigIpc
elif thread.core in SMALL:
total_ipc += thread.SmallIpc
print thread
# print 'idle:',
# for core in range(sim.config.ncores):
# print '%2.0f%%' % (
# 100 * sim.stats.get('performance_model', core, 'idle_elapsed_time') / float(sim.stats.time())),
# print '%7d' % sim.stats.get('performance_model', core, 'idle_elapsed_time'),
# print '\nthreads:',
# for thread in range(sim.thread.get_nthreads()):
# print '%7dkins' % (sim.stats.get('thread', thread, 'instruction_count'))
print '-----------------------'
def get_quantum_squareError(self, pred, y):
#pred is the predicted system IPC value and y is the observed IPC value after quantum
e = (pred - y)**2
return e
def get_quantum_percentError(self, pred, y):
#pred is the predicted system IPC value and y is the observed IPC value after quantum
e = abs(pred - y) / y
return e
def printInfo(self):
threads = [ thread for thread in self.threads.values() if thread.core is not None ]
print '----------- Quantum ', int(
sim.stats.time() / 1e12), '------------'
total_ipc = 0
for thread in threads:
total_ipc += thread.ipc
print "System IPC : " + str(total_ipc)
mapping = "[ "
core_mapping = defaultdict(list)
for thread in self.threads.values():
core_mapping[thread.core] = thread.thread_id
for i in range(0, (len(BIG) + len(SMALL))):
if core_mapping[i] or core_mapping[i] == 0:
mapping += str(core_mapping[i]) + " "
else:
mapping += "- "
mapping += "]"
if (int(sim.stats.time() / 1e12) > 1):
print "Misprediction : " + str(total_ipc - self.prev_predicted_ipc)
print "Predicted Ipc : " + str(self.predicted_ipc)
print "System Map " + mapping
print "Quantum Square Error : " + str(
self.get_quantum_squareError(self.prev_predicted_ipc, total_ipc))
print "Quantum Percent Error : " + str(
self.get_quantum_percentError(self.prev_predicted_ipc, total_ipc))
print "TId " + "B " + "S " + "Sc " + "Status " + "Core"
for thread in self.threads.values():
print thread
# print "*System IPC : " + str(self.predicted_ipc)
#
# mapping = "[ "
# core_mapping_predicted = defaultdict(list)
# for idx, thread in enumerate(self.predicted_mapping):
# core_mapping_predicted[idx] = thread.thread_id
# for i in range(0, (len(BIG) + len(SMALL))):
# if core_mapping_predicted[i] or core_mapping_predicted[i] == 0:
# mapping += str(core_mapping_predicted[i]) +" "
# else:
# mapping += "- "
# mapping +="]"
# print "*System Map " + mapping
# if(int(sim.stats.time() / 1e12) > 1):
# print "Avarage system misprediction : " + str(sum(self.prediction_gap) / len(self.prediction_gap))
# for thread in self.threads.values():
# if (thread.core in BIG and thread.prevCore in SMALL):
# print "thread id : ", str(thread.thread_id), " misprediction s2b : ", str(thread.BigIpc - thread.prevIPC)
# elif (thread.core in SMALL and thread.prevCore in BIG):
# print "thread id : ", str(thread.thread_id), " misprediction b2s : ", str(thread.SmallIpc - thread.prevIPC)
sim.util.register(SchedulerLocality())
| 38.393939
| 145
| 0.530268
|
9c2b8c98a686b239324f9ae6b078c448656cee3a
| 1,133
|
py
|
Python
|
vgg16/dogs_vs_cats/setup.py
|
Rumiachang/keras-examples
|
467ac2e693930980bb21315fb33b298fff852a31
|
[
"MIT"
] | null | null | null |
vgg16/dogs_vs_cats/setup.py
|
Rumiachang/keras-examples
|
467ac2e693930980bb21315fb33b298fff852a31
|
[
"MIT"
] | null | null | null |
vgg16/dogs_vs_cats/setup.py
|
Rumiachang/keras-examples
|
467ac2e693930980bb21315fb33b298fff852a31
|
[
"MIT"
] | null | null | null |
import os
import glob
"""
train.zipを解凍したtrainから
https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
にあるように訓練データを振り分ける
"""
imgs_path = "./image_dir/*.png"
source_dir = "./train"
train_dir = "./data/train"
valid_dir = "./data/validation"
os.makedirs("%s/dogs" % train_dir)
os.makedirs("%s/cats" % train_dir)
os.makedirs("%s/dogs" % valid_dir)
os.makedirs("%s/cats" % valid_dir)
"""
list_imgs_path = glob.glob(imgs_path)
for i, imgs_file in enumerate(list_imgs_path):
os.rename(imgs_file ,"./image_dir/" + str(i) + ".png" )
"""
# 最初の1000枚の画像をtrain_dirに移動
for i in range(1000):
os.rename("%s/dog.%d.jpg" % (source_dir, i + 1),
"%s/dogs/dog%04d.jpg" % (train_dir, i + 1))
os.rename("%s/cat.%d.jpg" % (source_dir, i + 1),
"%s/cats/cat%04d.jpg" % (train_dir, i + 1))
# 次の400枚の画像をvalid_dirに移動
for i in range(400):
os.rename("%s/dog.%d.jpg" % (source_dir, 1000 + i + 1),
"%s/dogs/dog%04d.jpg" % (valid_dir, i + 1))
os.rename("%s/cat.%d.jpg" % (source_dir, 1000 + i + 1),
"%s/cats/cat%04d.jpg" % (valid_dir, i + 1))
| 28.325
| 95
| 0.622242
|
7f30a0a5c85021642e41174e4d2682295901eedd
| 1,752
|
py
|
Python
|
fmit/blog/migrations/0001_initial.py
|
iamlion12/DjangoBlog
|
e5db1a3f02e23b740912fa5adc897bd3916f311b
|
[
"MIT"
] | null | null | null |
fmit/blog/migrations/0001_initial.py
|
iamlion12/DjangoBlog
|
e5db1a3f02e23b740912fa5adc897bd3916f311b
|
[
"MIT"
] | null | null | null |
fmit/blog/migrations/0001_initial.py
|
iamlion12/DjangoBlog
|
e5db1a3f02e23b740912fa5adc897bd3916f311b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-19 09:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='blog.Post'),
),
]
| 38.086957
| 121
| 0.61758
|
8aa776d9f8f41df833c9387d2d4502f55f5bd216
| 116,023
|
py
|
Python
|
Packs/RecordedFuture/Integrations/RecordedFuture/test_data/mock_samples.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 1
|
2021-08-07T00:21:58.000Z
|
2021-08-07T00:21:58.000Z
|
Packs/RecordedFuture/Integrations/RecordedFuture/test_data/mock_samples.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 3
|
2019-12-13T13:27:20.000Z
|
2020-01-01T14:27:45.000Z
|
Packs/RecordedFuture/Integrations/RecordedFuture/test_data/mock_samples.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 2
|
2020-12-10T12:02:45.000Z
|
2020-12-15T09:20:01.000Z
|
IP_LOOKUP = {
"data": {
"analystNotes": [
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 00",
"topic": {
"name": "Type 00"
},
"text": "Text 00"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 01",
"topic": {
"name": "Type 01"
},
"text": "Text 01"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 02",
"topic": {
"name": "Type 02"
},
"text": "Text 02"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 03",
"topic": {
"name": "Type 03"
},
"text": "Text 03"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 04",
"topic": {
"name": "Type 04"
},
"text": "Text 04"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 05",
"topic": {
"name": "Type 05"
},
"text": "Text 05"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 06",
"topic": {
"name": "Type 06"
},
"text": "Text 06"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 07",
"topic": {
"name": "Type 07"
},
"text": "Text 07"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 08",
"topic": {
"name": "Type 08"
},
"text": "Text 08"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 09",
"topic": {
"name": "Type 09"
},
"text": "Text 09"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 10",
"topic": {
"name": "Type 10"
},
"text": "Text 10"
}
},
{
"attributes": {
"published": "2019-01-01T00:00:00.000Z",
"title": "Title 11",
"topic": {
"name": "Type 11"
},
"text": "Text 11"
}
}
],
"timestamps": {
"firstSeen": "2019-01-01T00:00:00.000Z",
"lastSeen": "2019-12-31T23:59:59.999Z"
},
"threatLists": [
{
"id": "report:1",
"name": "Threat List 1",
"type": "EntityList",
"description": "Threat List Description 1"
},
{
"id": "report:2",
"name": "Threat List 2",
"type": "EntityList",
"description": "Threat List Description 2"
},
{
"id": "report:3",
"name": "Threat List 3",
"type": "EntityList",
"description": "Threat List Description 3"
},
{
"id": "report:4",
"name": "Threat List 4",
"type": "EntityList",
"description": "Threat List Description 4"
},
{
"id": "report:5",
"name": "Threat List 5",
"type": "EntityList",
"description": "Threat List Description 5"
},
{
"id": "report:6",
"name": "Threat List 6",
"type": "EntityList",
"description": "Threat List Description 6"
},
{
"id": "report:7",
"name": "Threat List 7",
"type": "EntityList",
"description": "Threat List Description 7"
},
{
"id": "report:8",
"name": "Threat List 8",
"type": "EntityList",
"description": "Threat List Description 8"
},
{
"id": "report:9",
"name": "Threat List 9",
"type": "EntityList",
"description": "Threat List Description 9"
},
{
"id": "report:10",
"name": "Threat List 10",
"type": "EntityList",
"description": "Threat List Description 10"
},
{
"id": "report:11",
"name": "Threat List 11",
"type": "EntityList",
"description": "Threat List Description 11"
},
{
"id": "report:12",
"name": "Threat List 12",
"type": "EntityList",
"description": "Threat List Description 12"
}
],
"risk": {
"criticalityLabel": "Malicious",
"score": 75,
"evidenceDetails": [
{
"migitationString": "MigitationString 9",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Very Malicious",
"evidenceString": "EvidenceString 9",
"rule": "RuleString 9",
"criticality": 1
},
{
"migitationString": "MigitationString 4",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Unusual",
"evidenceString": "EvidenceString 4",
"rule": "RuleString 4",
"criticality": 4
},
{
"migitationString": "MigitationString 7",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Unusual",
"evidenceString": "EvidenceString 7",
"rule": "RuleString 7",
"criticality": 3
},
{
"migitationString": "MigitationString 5",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Malicious",
"evidenceString": "EvidenceString 5",
"rule": "RuleString 5",
"criticality": 1
},
{
"migitationString": "MigitationString 1",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Unusual",
"evidenceString": "EvidenceString 1",
"rule": "RuleString 1",
"criticality": 1
},
{
"migitationString": "MigitationString 12",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Very Malicious",
"evidenceString": "EvidenceString 12",
"rule": "RuleString 12",
"criticality": 4
},
{
"migitationString": "MigitationString 6",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Very Malicious",
"evidenceString": "EvidenceString 6",
"rule": "RuleString 6",
"criticality": 2
},
{
"migitationString": "MigitationString 3",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Very Malicious",
"evidenceString": "EvidenceString 3",
"rule": "RuleString 3",
"criticality": 3
},
{
"migitationString": "MigitationString 10",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Unusual",
"evidenceString": "EvidenceString 10",
"rule": "RuleString 10",
"criticality": 2
},
{
"migitationString": "MigitationString 11",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Malicious",
"evidenceString": "EvidenceString 11",
"rule": "RuleString 11",
"criticality": 3
},
{
"migitationString": "MigitationString 8",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Malicious",
"evidenceString": "EvidenceString 8",
"rule": "RuleString 8",
"criticality": 4
},
{
"migitationString": "MigitationString 2",
"timestamp": "2019-01-01T00:00:00.000Z",
"criticalityLabel": "Malicious",
"evidenceString": "EvidenceString 2",
"rule": "RuleString 2",
"criticality": 2
}
],
"riskString": "12/42",
"rules": 12,
"criticality": 3,
"riskSummary": "12 of 42 Risk Rules currently observed."
},
"intelCard": "https://app.recordedfuture.com/live/sc/entity/DUMMY",
"sightings": [
{
"source": "recentSocialMedia source",
"url": "https://www.recentSocialMedia.com",
"published": "2019-01-01T00:00:00.000Z",
"fragment": "Fragment Sighting recentSocialMedia",
"title": "Fragment Title recentSocialMedia",
"type": "recentSocialMedia"
},
{
"source": "recentInfoSec source",
"url": "https://www.recentInfoSec.com",
"published": "2019-01-01T00:00:00.000Z",
"fragment": "Fragment Sighting recentInfoSec",
"title": "Fragment Title recentInfoSec",
"type": "recentInfoSec"
},
{
"source": "first source",
"url": "https://www.first.com",
"published": "2019-01-01T00:00:00.000Z",
"fragment": "Fragment Sighting first",
"title": "Fragment Title first",
"type": "first"
},
{
"source": "mostRecent source",
"url": "https://www.mostRecent.com",
"published": "2019-01-01T00:00:00.000Z",
"fragment": "Fragment Sighting mostRecent",
"title": "Fragment Title mostRecent",
"type": "mostRecent"
}
],
"entity": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"counts": [
{
"count": 2,
"date": "2019-10-30"
},
{
"count": 9,
"date": "2019-10-23"
},
{
"count": 3,
"date": "2019-10-29"
},
{
"count": 13,
"date": "2019-10-19"
},
{
"count": 18,
"date": "2019-10-14"
},
{
"count": 19,
"date": "2019-10-13"
},
{
"count": 1,
"date": "2019-10-31"
},
{
"count": 6,
"date": "2019-10-26"
},
{
"count": 16,
"date": "2019-10-16"
},
{
"count": 11,
"date": "2019-10-21"
},
{
"count": 10,
"date": "2019-10-22"
},
{
"count": 14,
"date": "2019-10-18"
},
{
"count": 17,
"date": "2019-10-15"
},
{
"count": 5,
"date": "2019-10-27"
},
{
"count": 0,
"date": "2019-11-01"
},
{
"count": 8,
"date": "2019-10-24"
},
{
"count": 4,
"date": "2019-10-28"
},
{
"count": 12,
"date": "2019-10-20"
},
{
"count": 7,
"date": "2019-10-25"
},
{
"count": 15,
"date": "2019-10-17"
}
],
"relatedEntities": [
{
"type": "RelatedCompany",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Company 00",
"type": "Company"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Company 01",
"type": "Company"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Company 02",
"type": "Company"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Company 03",
"type": "Company"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Company 04",
"type": "Company"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Company 05",
"type": "Company"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Company 06",
"type": "Company"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Company 07",
"type": "Company"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Company 08",
"type": "Company"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Company 09",
"type": "Company"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Company 10",
"type": "Company"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Company 11",
"type": "Company"
}
}
]
},
{
"type": "RelatedAttacker",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Attacker 00",
"type": "Attacker"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Attacker 01",
"type": "Attacker"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Attacker 02",
"type": "Attacker"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Attacker 03",
"type": "Attacker"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Attacker 04",
"type": "Attacker"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Attacker 05",
"type": "Attacker"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Attacker 06",
"type": "Attacker"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Attacker 07",
"type": "Attacker"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Attacker 08",
"type": "Attacker"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Attacker 09",
"type": "Attacker"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Attacker 10",
"type": "Attacker"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Attacker 11",
"type": "Attacker"
}
}
]
},
{
"type": "RelatedTarget",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Target 00",
"type": "Target"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Target 01",
"type": "Target"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Target 02",
"type": "Target"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Target 03",
"type": "Target"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Target 04",
"type": "Target"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Target 05",
"type": "Target"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Target 06",
"type": "Target"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Target 07",
"type": "Target"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Target 08",
"type": "Target"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Target 09",
"type": "Target"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Target 10",
"type": "Target"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Target 11",
"type": "Target"
}
}
]
},
{
"type": "RelatedThreatActor",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "ThreatActor 00",
"type": "ThreatActor"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "ThreatActor 01",
"type": "ThreatActor"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "ThreatActor 02",
"type": "ThreatActor"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "ThreatActor 03",
"type": "ThreatActor"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "ThreatActor 04",
"type": "ThreatActor"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "ThreatActor 05",
"type": "ThreatActor"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "ThreatActor 06",
"type": "ThreatActor"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "ThreatActor 07",
"type": "ThreatActor"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "ThreatActor 08",
"type": "ThreatActor"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "ThreatActor 09",
"type": "ThreatActor"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "ThreatActor 10",
"type": "ThreatActor"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "ThreatActor 11",
"type": "ThreatActor"
}
}
]
},
{
"type": "RelatedMalware",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Malware 00",
"type": "Malware"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Malware 01",
"type": "Malware"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Malware 02",
"type": "Malware"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Malware 03",
"type": "Malware"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Malware 04",
"type": "Malware"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Malware 05",
"type": "Malware"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Malware 06",
"type": "Malware"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Malware 07",
"type": "Malware"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Malware 08",
"type": "Malware"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Malware 09",
"type": "Malware"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Malware 10",
"type": "Malware"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Malware 11",
"type": "Malware"
}
}
]
},
{
"type": "RelatedCyberVulnerability",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 00",
"type": "CyberVulnerability"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 01",
"type": "CyberVulnerability"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 02",
"type": "CyberVulnerability"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 03",
"type": "CyberVulnerability"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 04",
"type": "CyberVulnerability"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 05",
"type": "CyberVulnerability"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 06",
"type": "CyberVulnerability"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 07",
"type": "CyberVulnerability"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 08",
"type": "CyberVulnerability"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 09",
"type": "CyberVulnerability"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 10",
"type": "CyberVulnerability"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "CyberVulnerability 11",
"type": "CyberVulnerability"
}
}
]
},
{
"type": "RelatedIpAddress",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "IpAddress 00",
"type": "IpAddress"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "IpAddress 01",
"type": "IpAddress"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "IpAddress 02",
"type": "IpAddress"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "IpAddress 03",
"type": "IpAddress"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "IpAddress 04",
"type": "IpAddress"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "IpAddress 05",
"type": "IpAddress"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "IpAddress 06",
"type": "IpAddress"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "IpAddress 07",
"type": "IpAddress"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "IpAddress 08",
"type": "IpAddress"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "IpAddress 09",
"type": "IpAddress"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "IpAddress 10",
"type": "IpAddress"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "IpAddress 11",
"type": "IpAddress"
}
}
]
},
{
"type": "RelatedInternetDomainName",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "InternetDomainName 00",
"type": "InternetDomainName"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "InternetDomainName 01",
"type": "InternetDomainName"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "InternetDomainName 02",
"type": "InternetDomainName"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "InternetDomainName 03",
"type": "InternetDomainName"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "InternetDomainName 04",
"type": "InternetDomainName"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "InternetDomainName 05",
"type": "InternetDomainName"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "InternetDomainName 06",
"type": "InternetDomainName"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "InternetDomainName 07",
"type": "InternetDomainName"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "InternetDomainName 08",
"type": "InternetDomainName"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "InternetDomainName 09",
"type": "InternetDomainName"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "InternetDomainName 10",
"type": "InternetDomainName"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "InternetDomainName 11",
"type": "InternetDomainName"
}
}
]
},
{
"type": "RelatedProduct",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Product 00",
"type": "Product"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Product 01",
"type": "Product"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Product 02",
"type": "Product"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Product 03",
"type": "Product"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Product 04",
"type": "Product"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Product 05",
"type": "Product"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Product 06",
"type": "Product"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Product 07",
"type": "Product"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Product 08",
"type": "Product"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Product 09",
"type": "Product"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Product 10",
"type": "Product"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Product 11",
"type": "Product"
}
}
]
},
{
"type": "RelatedCountries",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Countries 00",
"type": "Countries"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Countries 01",
"type": "Countries"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Countries 02",
"type": "Countries"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Countries 03",
"type": "Countries"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Countries 04",
"type": "Countries"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Countries 05",
"type": "Countries"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Countries 06",
"type": "Countries"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Countries 07",
"type": "Countries"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Countries 08",
"type": "Countries"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Countries 09",
"type": "Countries"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Countries 10",
"type": "Countries"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Countries 11",
"type": "Countries"
}
}
]
},
{
"type": "RelatedHash",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Hash 00",
"type": "Hash"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Hash 01",
"type": "Hash"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Hash 02",
"type": "Hash"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Hash 03",
"type": "Hash"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Hash 04",
"type": "Hash"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Hash 05",
"type": "Hash"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Hash 06",
"type": "Hash"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Hash 07",
"type": "Hash"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Hash 08",
"type": "Hash"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Hash 09",
"type": "Hash"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Hash 10",
"type": "Hash"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Hash 11",
"type": "Hash"
}
}
]
},
{
"type": "RelatedTechnology",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Technology 00",
"type": "Technology"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Technology 01",
"type": "Technology"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Technology 02",
"type": "Technology"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Technology 03",
"type": "Technology"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Technology 04",
"type": "Technology"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Technology 05",
"type": "Technology"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Technology 06",
"type": "Technology"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Technology 07",
"type": "Technology"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Technology 08",
"type": "Technology"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Technology 09",
"type": "Technology"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Technology 10",
"type": "Technology"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Technology 11",
"type": "Technology"
}
}
]
},
{
"type": "RelatedEmailAddress",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "EmailAddress 00",
"type": "EmailAddress"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "EmailAddress 01",
"type": "EmailAddress"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "EmailAddress 02",
"type": "EmailAddress"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "EmailAddress 03",
"type": "EmailAddress"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "EmailAddress 04",
"type": "EmailAddress"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "EmailAddress 05",
"type": "EmailAddress"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "EmailAddress 06",
"type": "EmailAddress"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "EmailAddress 07",
"type": "EmailAddress"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "EmailAddress 08",
"type": "EmailAddress"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "EmailAddress 09",
"type": "EmailAddress"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "EmailAddress 10",
"type": "EmailAddress"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "EmailAddress 11",
"type": "EmailAddress"
}
}
]
},
{
"type": "RelatedAttackVector",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "AttackVector 00",
"type": "AttackVector"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "AttackVector 01",
"type": "AttackVector"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "AttackVector 02",
"type": "AttackVector"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "AttackVector 03",
"type": "AttackVector"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "AttackVector 04",
"type": "AttackVector"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "AttackVector 05",
"type": "AttackVector"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "AttackVector 06",
"type": "AttackVector"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "AttackVector 07",
"type": "AttackVector"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "AttackVector 08",
"type": "AttackVector"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "AttackVector 09",
"type": "AttackVector"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "AttackVector 10",
"type": "AttackVector"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "AttackVector 11",
"type": "AttackVector"
}
}
]
},
{
"type": "RelatedMalwareCategory",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "MalwareCategory 00",
"type": "MalwareCategory"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "MalwareCategory 01",
"type": "MalwareCategory"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "MalwareCategory 02",
"type": "MalwareCategory"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "MalwareCategory 03",
"type": "MalwareCategory"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "MalwareCategory 04",
"type": "MalwareCategory"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "MalwareCategory 05",
"type": "MalwareCategory"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "MalwareCategory 06",
"type": "MalwareCategory"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "MalwareCategory 07",
"type": "MalwareCategory"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "MalwareCategory 08",
"type": "MalwareCategory"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "MalwareCategory 09",
"type": "MalwareCategory"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "MalwareCategory 10",
"type": "MalwareCategory"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "MalwareCategory 11",
"type": "MalwareCategory"
}
}
]
},
{
"type": "RelatedOperations",
"entities": [
{
"count": 11,
"entity": {
"id": "dummy",
"name": "Operations 00",
"type": "Operations"
}
},
{
"count": 10,
"entity": {
"id": "dummy",
"name": "Operations 01",
"type": "Operations"
}
},
{
"count": 9,
"entity": {
"id": "dummy",
"name": "Operations 02",
"type": "Operations"
}
},
{
"count": 8,
"entity": {
"id": "dummy",
"name": "Operations 03",
"type": "Operations"
}
},
{
"count": 7,
"entity": {
"id": "dummy",
"name": "Operations 04",
"type": "Operations"
}
},
{
"count": 6,
"entity": {
"id": "dummy",
"name": "Operations 05",
"type": "Operations"
}
},
{
"count": 5,
"entity": {
"id": "dummy",
"name": "Operations 06",
"type": "Operations"
}
},
{
"count": 4,
"entity": {
"id": "dummy",
"name": "Operations 07",
"type": "Operations"
}
},
{
"count": 3,
"entity": {
"id": "dummy",
"name": "Operations 08",
"type": "Operations"
}
},
{
"count": 2,
"entity": {
"id": "dummy",
"name": "Operations 09",
"type": "Operations"
}
},
{
"count": 1,
"entity": {
"id": "dummy",
"name": "Operations 10",
"type": "Operations"
}
},
{
"count": 0,
"entity": {
"id": "dummy",
"name": "Operations 11",
"type": "Operations"
}
}
]
}
],
"riskyCIDRIPs": [
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 60
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 17
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 46
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 35
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 59
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 77
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 49
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 15
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 93
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 24
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 52
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 13
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 7
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 95
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 67
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 50
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 9
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 40
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 33
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 65
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 84
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 69
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 85
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 37
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 88
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 68
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 11
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 86
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 58
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 62
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 81
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 30
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 56
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 36
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 19
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 51
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 99
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 25
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 28
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 80
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 23
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 54
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 74
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 78
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 42
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 66
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 98
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 75
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 61
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 71
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 83
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 97
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 29
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 87
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 90
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 26
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 14
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 72
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 32
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 57
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 45
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 2
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 31
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 82
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 48
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 34
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 44
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 3
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 47
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 43
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 63
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 55
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 22
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 16
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 91
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 73
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 79
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 6
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 27
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 5
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 64
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 89
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 92
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 76
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 21
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 39
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 94
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 1
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 20
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 18
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 4
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 12
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 53
},
{
"ip": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"score": 41
},
{
"ip": {
"id": "ip:1.2.3.1",
"name": "1.2.3.1",
"type": "IpAddress"
},
"score": 10
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 70
},
{
"ip": {
"id": "ip:1.2.3.2",
"name": "1.2.3.2",
"type": "IpAddress"
},
"score": 8
},
{
"ip": {
"id": "ip:1.2.3.3",
"name": "1.2.3.3",
"type": "IpAddress"
},
"score": 38
}
],
"metrics": [
{
"type": "pasteHits",
"value": 2
},
{
"type": "darkWebHits",
"value": 2
},
{
"type": "trendVolume",
"value": 2
},
{
"type": "criticality",
"value": 2
},
{
"type": "linkedIntrusion",
"value": 2
},
{
"type": "recentLinkedIntrusion",
"value": 2
},
{
"type": "undergroundForumHits",
"value": 2
},
{
"type": "maliciousHits",
"value": 2
},
{
"type": "technicalReportingHits",
"value": 2
},
{
"type": "infoSecHits",
"value": 2
},
{
"type": "totalHits",
"value": 42
},
{
"type": "sixtyDaysHits",
"value": 2
},
{
"type": "defanged",
"value": 2
},
{
"type": "oneDayHits",
"value": 2
},
{
"type": "recentDefanged",
"value": 2
},
{
"type": "socialMediaHits",
"value": 2
},
{
"type": "sevenDaysHits",
"value": 2
}
],
"location": {
"asn": "AS11111",
"location": {
"continent": "Europe",
"city": "Gothenburg",
"country": "Sweden"
},
"cidr": {
"id": "ip:1.2.3.0/25",
"name": "1.2.3.0/25",
"type": "IpAddress"
},
"organization": "Recorded Future"
}
}
}
IP_REP = {
"data": {
"results": [
{
"entity": {
"id": "ip:1.2.3.4",
"name": "1.2.3.4",
"type": "IpAddress"
},
"risk": {
"level": 4,
"rule": {
"count": 10,
"evidence": {
"honeypot": {
"timestamp": "2016-10-13T10:34:01.000Z",
"description": "23 sightings on 9 sources "
"including: @atma_es, @Webiron"
"Bots, @Chester250, @olaf_j, @E"
"IS_BFB. Most recent tweet: Sus"
"picious Activity Captured Fro"
"m: 1.2.3.4 on port 123 #"
"HITME. Most recent link (Oct 1"
"3, 2016): https://twitter.com/"
"HoneyPoint/statuses/7865153291"
"66020608",
"rule": "Historical Honeypot Sighting",
"mitigation": "",
"level": 1
},
"linkedIntrusion": {
"timestamp": "2016-08-26T19:06:14.000Z",
"description": "12 sightings on 4 sources: Rev"
"ersingLabs, @olaf_j, @EIS_BFB,"
" @monitor1125. 3 related intru"
"sion methods: Brute Force Atta"
"ck, Brute Force Blocking (BFB"
"), Trojan.Gafgyt. Most recent "
"tweet: Masterdeb1 BFB-attack d"
"etected from 1.2.3.4 to "
"APACHE Accesslistscan on 26.08"
".2016 21:06:12. Most recent l"
"ink (Aug 26, 2016): https://t"
"witter.com/olaf_j/statuses/76"
"9249615615000576",
"rule": "Historically Linked to "
"Intrusion Method",
"mitigation": "",
"level": 1
},
"recentCncServer": {
"timestamp": "2019-11-25T20:56:19.230Z",
"description": "1 sighting on 1 source: Cobalt"
" Strike Default Certificate"
" Detected - Shodan / Record"
"ed Future.",
"rule": "Current C&C Server",
"mitigation": "",
"level": 4
},
"recentDefanged": {
"timestamp": "2019-11-21T15:58:45.000Z",
"description": "1 sighting on 1 source: @Jas"
"onMilletary. Most recent twe"
"et: C2Server: 37.48.83[.]137"
",/pixel.gif Path: /submit.ph"
"p User Agent: Mozilla/4.0 (c"
"ompatible; MSIE 8.0; Windows"
" NT 5.1; Trident/4.0). Most "
"recent link (Nov 21, 2019): "
"https://twitter.com/JasonMil"
"letary/statuses/119754489002"
"1928961",
"rule": "Recently Reported as a Defanged IP",
"mitigation": "",
"level": 2
},
"recentActiveCnc": {
"timestamp": "2019-11-25T20:56:19.234Z",
"description": "1 sighting on 1 source: Recor"
"ded Future Network Traffic An"
"alysis. Communication observ"
"ed on TCP:443. Last observed"
" on Nov 22, 2019.",
"rule": "Actively Communicating C&C Server",
"mitigation": "",
"level": 4
},
"linkedToCyberAttack": {
"timestamp": "2016-10-13T10:34:01.000Z",
"description": "7 sightings on 4 sources: @a"
"tma_es, @olaf_j, @EIS_BFB, @"
"HoneyPoint. Most recent twee"
"t: Suspicious Activity Captu"
"red From: 1.2.3.4 on po"
"rt 123 #HITME. Most recent li"
"nk (Oct 13, 2016): https://t"
"witter.com/HoneyPoint/status"
"es/786515329166020608",
"rule": "Historically Linked to Cyber Attack",
"mitigation": "",
"level": 1
}
},
"maxCount": 51
},
"score": 99
}
},
{
"entity": {
"id": "ip:8.8.8.8",
"name": "8.8.8.8",
"type": "IpAddress"
},
"risk": {
"level": 4,
"rule": {
"count": 10,
"evidence": {
"honeypot": {
"timestamp": "2016-10-13T10:34:01.000Z",
"description": "23 sightings on 9 sources "
"including: @atma_es, @Webiron"
"Bots, @Chester250, @olaf_j, @E"
"IS_BFB. Most recent tweet: Sus"
"picious Activity Captured Fro"
"m: 8.8.8.8 on port 123 #"
"HITME. Most recent link (Oct 1"
"3, 2016): https://twitter.com/"
"HoneyPoint/statuses/7865153291"
"66020608",
"rule": "Historical Honeypot Sighting",
"mitigation": "",
"level": 1
},
"linkedIntrusion": {
"timestamp": "2016-08-26T19:06:14.000Z",
"description": "12 sightings on 4 sources: Rev"
"ersingLabs, @olaf_j, @EIS_BFB,"
" @monitor1125. 3 related intru"
"sion methods: Brute Force Atta"
"ck, Brute Force Blocking (BFB"
"), Trojan.Gafgyt. Most recent "
"tweet: Masterdeb1 BFB-attack d"
"etected from 8.8.8.8 to "
"APACHE Accesslistscan on 26.08"
".2016 21:06:12. Most recent l"
"ink (Aug 26, 2016): https://t"
"witter.com/olaf_j/statuses/76"
"9249615615000576",
"rule": "Historically Linked to "
"Intrusion Method",
"mitigation": "",
"level": 1
},
"recentCncServer": {
"timestamp": "2019-11-25T20:56:19.230Z",
"description": "1 sighting on 1 source: Cobalt"
" Strike Default Certificate"
" Detected - Shodan / Record"
"ed Future.",
"rule": "Current C&C Server",
"mitigation": "",
"level": 4
},
"recentDefanged": {
"timestamp": "2019-11-21T15:58:45.000Z",
"description": "1 sighting on 1 source: @Jas"
"onMilletary. Most recent twe"
"et: C2Server: 37.48.83[.]137"
",/pixel.gif Path: /submit.ph"
"p User Agent: Mozilla/4.0 (c"
"ompatible; MSIE 8.0; Windows"
" NT 5.1; Trident/4.0). Most "
"recent link (Nov 21, 2019): "
"https://twitter.com/JasonMil"
"letary/statuses/119754489002"
"1928961",
"rule": "Recently Reported as a Defanged IP",
"mitigation": "",
"level": 2
},
"recentActiveCnc": {
"timestamp": "2019-11-25T20:56:19.234Z",
"description": "1 sighting on 1 source: Recor"
"ded Future Network Traffic An"
"alysis. Communication observ"
"ed on TCP:443. Last observed"
" on Nov 22, 2019.",
"rule": "Actively Communicating C&C Server",
"mitigation": "",
"level": 4
},
"linkedToCyberAttack": {
"timestamp": "2016-10-13T10:34:01.000Z",
"description": "7 sightings on 4 sources: @a"
"tma_es, @olaf_j, @EIS_BFB, @"
"HoneyPoint. Most recent twee"
"t: Suspicious Activity Captu"
"red From: 8.8.8.8 on po"
"rt 123 #HITME. Most recent li"
"nk (Oct 13, 2016): https://t"
"witter.com/HoneyPoint/status"
"es/786515329166020608",
"rule": "Historically Linked to Cyber Attack",
"mitigation": "",
"level": 1
}
},
"maxCount": 51
},
"score": 99
}
}
]
},
"counts": {
"returned": 1,
"total": 1
}
}
TRIAGE = {
"entities": [
{
"name": "8.8.8.8",
"score": 0,
"rule": {
"count": 0,
"evidence": [],
"maxCount": 1
},
"id": "ip:8.8.8.8",
"type": "IpAddress"
},
{
"name": "https://sites.google.com/site/unblockingnotice/",
"score": 0,
"rule": {
"count": 2,
"evidence": [],
"maxCount": 3
},
"id": "url:https://sites.google.com/site/unblockingnotice/",
"type": "URL"
},
{
"name": "CVE-2020-8813",
"score": 79,
"rule": {
"count": 4,
"evidence": [],
"maxCount": 22
},
"id": "c5zWtD",
"type": "CyberVulnerability"
},
{
"name": "fa964842244e752950fd4ed711759382a8950e"
"13cc2794d6f73ab7eb9169e5ee",
"score": 77,
"rule": {
"count": 4,
"evidence": [],
"maxCount": 12
},
"id": "hash:fa964842244e752950fd4ed711759382a895"
"0e13cc2794d6f73ab7eb9169e5ee",
"type": "Hash"
},
{
"name": "www.feddoctor.com",
"score": 0,
"rule": {
"count": 3,
"evidence": [],
"maxCount": 2
},
"id": "idn:www.feddoctor.com",
"type": "InternetDomainName"
},
{
"name": "CVE-2011-3874",
"score": 99,
"rule": {
"count": 5,
"evidence": [],
"maxCount": 22
},
"id": "KIHnRI",
"type": "CyberVulnerability"
},
{
"name": "1.1.1.1",
"score": 0,
"rule": {
"count": 0,
"evidence": [],
"maxCount": 1
},
"id": "ip:1.1.1.1",
"type": "IpAddress"
}
],
"threshold_type": "max",
"context": "phishing",
"verdict": False,
"threshold": 65,
"scores": {
"max": 0,
"min": 0
}
}
ALERT_RULES = {
"data": {
"results": [
{
"title": "Third-Party Risk, Trend",
"id": "d071-i"
},
{
"title": "Infrastructure and Brand Risk, Potential "
"Typosquatting Watch List Domains",
"id": "dmQn3r"
},
{
"title": "Company Email on Code Repository",
"id": "dlYHpI"
},
{
"title": "Merchants and POS, Trending Targets in"
" Merchant & POS",
"id": "dhfdl-"
},
{
"title": "Target Trends, Trending Targets in Watch List",
"id": "dbWEXt"
},
{
"title": "Possible Fraud related to COVID-19",
"id": "dQ5lJN"
},
{
"title": "COVID-19 linked Cyber Attacks (Social Media)",
"id": "dRDebc"
},
{
"title": "COVID-19 Suspicious Domain Registrations",
"id": "dRDebb"
},
{
"title": "COVID-19 linked Cyber Attacks (non-Social Media)",
"id": "dRDeba"
},
{
"title": "COVID-19 Insikt Group Reporting",
"id": "dQTh2T"
}
]
},
"counts": {
"returned": 10,
"total": 101
}
}
ALERTS = {
"data": {
"results": [
{
"review": {
"assignee": "None",
"noteAuthor": "None",
"note": "None",
"status": "no-action",
"noteDate": "None"
},
"url": "https://app.recordedfuture.com/live/sc/notification/"
"?id=d6bWi2",
"rule": {
"url": "https://app.recordedfuture.com/live/sc/ViewIdkobra"
"_view_report_item_alert_editor?view_opts=%7B%22rep"
"ortId%22%3A%22dQTh2T%22%2C%22bTitle%22%3Atrue%2C%"
"22title%22%3A%22COVID-19+Insikt+Group+Reporting%2"
"2%7D&state.bNavbar=false",
"name": "COVID-19 Insikt Group Reporting",
"id": "dQTh2T"
},
"triggered": "2020-05-22T10:36:35.203Z",
"id": "d6bWi2",
"title": "COVID-19 Insikt Group Reporting - New reference "
"in 1 document",
"type": "REFERENCE"
},
{
"review": {
"assignee": "None",
"noteAuthor": "None",
"note": "None",
"status": "no-action",
"noteDate": "None"
},
"url": "https://app.recordedfuture.com/live/sc/notification/"
"?id=d6a-48",
"rule": {
"url": "https://app.recordedfuture.com/live/sc/ViewIdkobra"
"_view_report_item_alert_editor?view_opts=%7B%22rep"
"ortId%22%3A%22dRDebb%22%2C%22bTitle%22%3Atrue%2C%2"
"2title%22%3A%22COVID-19+Suspicious+Domain+Registra"
"tions%22%7D&state.bNavbar=false",
"name": "COVID-19 Suspicious Domain Registrations",
"id": "dRDebb"
},
"triggered": "2020-05-22T10:18:34.231Z",
"id": "d6a-48",
"title": "COVID-19 Suspicious Domain Registrations - "
"New references in 60 documents",
"type": "REFERENCE"
},
{
"review": {
"assignee": "None",
"noteAuthor": "None",
"note": "None",
"status": "no-action",
"noteDate": "None"
},
"url": "https://app.recordedfuture.com/live/sc/notification/"
"?id=d6anxr",
"rule": {
"url": "https://app.recordedfuture.com/live/sc/ViewIdkobr"
"a_view_report_item_alert_editor?view_opts=%7B%22r"
"eportId%22%3A%22Z7VJ3f%22%2C%22bTitle%22%3Atrue%2"
"C%22title%22%3A%22Brand+Mentions+on+Non-Mainstrea"
"m+Sources%22%7D&state.bNavbar=false",
"name": "Brand Mentions on Non-Mainstream Sources",
"id": "Z7VJ3f"
},
"triggered": "2020-05-22T10:17:41.563Z",
"id": "d6anxr",
"title": "Brand Mentions on Non-Mainstream Sources - New "
"references in 48 documents",
"type": "EVENT"
}
]
},
"counts": {
"returned": 10,
"total": 18252
}
}
| 34.367002
| 79
| 0.219008
|
9611c4e388d04c36da3bcf08040fb39d9b0a107d
| 1,169
|
py
|
Python
|
Sketchbots/sw/labqueue/lask/server/http/default_handler.py
|
rlugojr/ChromeWebLab
|
60f964b3f283c15704b7a04b7bb50cb15791e2e4
|
[
"Apache-2.0"
] | 306
|
2015-01-09T14:03:44.000Z
|
2017-09-16T13:03:35.000Z
|
Sketchbots/sw/labqueue/lask/server/http/default_handler.py
|
rlugojr/ChromeWebLab
|
60f964b3f283c15704b7a04b7bb50cb15791e2e4
|
[
"Apache-2.0"
] | 90
|
2019-03-26T05:36:00.000Z
|
2021-07-28T05:30:16.000Z
|
Sketchbots/sw/labqueue/lask/server/http/default_handler.py
|
rlugojr/ChromeWebLab
|
60f964b3f283c15704b7a04b7bb50cb15791e2e4
|
[
"Apache-2.0"
] | 119
|
2015-01-26T15:04:33.000Z
|
2017-09-13T09:30:53.000Z
|
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Part of Lask, the Web Lab Task management system.
Handlers for the LASK HTTP server.
"""
import webapp2
import config
from support.handlers import JSONResponseRPCHandler
#####################################################################
#
# This is the basis for most HTTP handlers in the system
#
class DefaultHandler(JSONResponseRPCHandler):
""" The base for most handlers in the system
"""
_enable_help = config.HTTP_HELP
_raise_exceptions = config.HTTP_RAISE_EXCEPTIONS_IN_REQUESTS
cors_allow_patterns = [
r"*",
]
| 29.974359
| 77
| 0.682635
|
27f8e20a91bea0381e6664f8527a33044c90336c
| 3,122
|
py
|
Python
|
flask_melodramatiq/__init__.py
|
epandurski/flask_melodramatiq
|
f4d95c7efa05050d84ea26bdb4074195ad1ae4e6
|
[
"MIT"
] | 9
|
2019-03-22T15:58:32.000Z
|
2022-03-27T18:27:44.000Z
|
flask_melodramatiq/__init__.py
|
epandurski/flask_melodramatiq
|
f4d95c7efa05050d84ea26bdb4074195ad1ae4e6
|
[
"MIT"
] | 3
|
2020-01-30T16:11:03.000Z
|
2020-02-07T15:53:19.000Z
|
flask_melodramatiq/__init__.py
|
epandurski/flask_melodramatiq
|
f4d95c7efa05050d84ea26bdb4074195ad1ae4e6
|
[
"MIT"
] | 1
|
2020-02-01T09:35:30.000Z
|
2020-02-01T09:35:30.000Z
|
import importlib
import functools
import dramatiq
from flask_melodramatiq.lazy_broker import (
LAZY_BROKER_DOCSTRING_TEMPLATE,
register_broker_class,
LazyActor,
LazyBrokerMixin,
Broker,
missing,
)
from flask_melodramatiq.rabbitmq import RabbitmqBrokerMixin
__all__ = ['create_broker_class', 'Broker', 'RabbitmqBroker', 'RedisBroker', 'StubBroker']
def create_broker_class(classpath, *, classname=None, docstring=None, mixins=()):
"""Create a new lazy broker class that wraps an existing broker class.
:param classpath: A module path to the existing broker class. For
example: ``"dramatiq.brokers.rabbitmq:RabbitmqBroker"``.
:param classname: Optional name for the new class. If not given,
the class name specified in **classpath** will be used.
:param docstring: Optional documentation string for the new class
:param mixins: Optional additional mix-in classes
:type mixins: tuple(type)
:return: The created lazy broker class
Example::
from flask_melodramatiq import create_broker_class
PostgresBroker = create_broker_class('dramatiq_pg:PostgresBroker')
"""
modname, varname = classpath.split(':', maxsplit=1)
classname = classname or varname
try:
module = importlib.import_module(modname)
except ImportError as e:
# We will raise this exact import error when the class is
# instantiated by the user.
raise_import_error = functools.partial(raise_error, e)
broker_class = type(classname, mixins + (Broker,), dict(
__init__=raise_import_error,
__doc__=docstring,
_dramatiq_broker_factory=raise_import_error,
))
else:
superclass = getattr(module, varname)
broker_class = type(classname, mixins + (LazyBrokerMixin, superclass), dict(
__doc__=docstring,
_dramatiq_broker_factory=superclass,
))
register_broker_class(broker_class)
return broker_class
def raise_error(e, *args, **kwargs):
raise e
# We change the default actor class used by the `dramatiq.actor`
# decorator to `LazyActor`. This should be safe because for regular
# brokers and "init_app"-ed lazy brokers `LazyActor` behaves exactly
# as `dramatiq.Actor`.
dramatiq.actor.__kwdefaults__['actor_class'] = LazyActor
RabbitmqBroker = create_broker_class(
classpath='dramatiq.brokers.rabbitmq:RabbitmqBroker',
docstring=LAZY_BROKER_DOCSTRING_TEMPLATE.format(
description='A lazy broker wrapping a :class:`~dramatiq.brokers.rabbitmq.RabbitmqBroker`.\n',
),
mixins=(RabbitmqBrokerMixin,)
)
RedisBroker = create_broker_class(
classpath='dramatiq.brokers.redis:RedisBroker',
docstring=LAZY_BROKER_DOCSTRING_TEMPLATE.format(
description='A lazy broker wrapping a :class:`~dramatiq.brokers.redis.RedisBroker`.\n',
),
)
StubBroker = create_broker_class(
classpath='dramatiq.brokers.stub:StubBroker',
docstring=LAZY_BROKER_DOCSTRING_TEMPLATE.format(
description='A lazy broker wrapping a :class:`~dramatiq.brokers.stub.StubBroker`.\n',
),
)
| 31.857143
| 101
| 0.71909
|
0194943567c2ee060609e59a31989ccb516896f6
| 1,171
|
py
|
Python
|
app/cli.py
|
kiza054/woodhall-scout-blog-prototype
|
bc7dc0b766263bb7a1a4d342d27c57d7989ff152
|
[
"MIT"
] | null | null | null |
app/cli.py
|
kiza054/woodhall-scout-blog-prototype
|
bc7dc0b766263bb7a1a4d342d27c57d7989ff152
|
[
"MIT"
] | null | null | null |
app/cli.py
|
kiza054/woodhall-scout-blog-prototype
|
bc7dc0b766263bb7a1a4d342d27c57d7989ff152
|
[
"MIT"
] | null | null | null |
import os
import click
def register(app):
@app.cli.group()
def translate():
"""Translation and localization commands."""
pass
@translate.command()
@click.argument('lang')
def init(lang):
"""Initialize a new language."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system(
'pybabel init -i messages.pot -d app/translations -l ' + lang):
raise RuntimeError('init command failed')
os.remove('messages.pot')
@translate.command()
def update():
"""Update all languages."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system('pybabel update -i messages.pot -d app/translations'):
raise RuntimeError('update command failed')
os.remove('messages.pot')
@translate.command()
def compile():
"""Compile all languages."""
if os.system('pybabel compile -d app/translations'):
raise RuntimeError('compile command failed')
| 34.441176
| 79
| 0.604611
|
2f4f31f04d7783f7bb4b0253e7f905bb0a41578e
| 7,579
|
py
|
Python
|
tests/test_shelving.py
|
smbambling/alerta
|
1b3c3888b67ac4db48ef5eb9dcd704ac0c5aecb1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_shelving.py
|
smbambling/alerta
|
1b3c3888b67ac4db48ef5eb9dcd704ac0c5aecb1
|
[
"Apache-2.0"
] | 60
|
2020-07-27T07:00:45.000Z
|
2022-03-21T18:02:18.000Z
|
tests/test_shelving.py
|
smbambling/alerta
|
1b3c3888b67ac4db48ef5eb9dcd704ac0c5aecb1
|
[
"Apache-2.0"
] | 1
|
2020-11-24T03:16:49.000Z
|
2020-11-24T03:16:49.000Z
|
import json
import unittest
from alerta.app import create_app, db
from alerta.models.key import ApiKey
class ShelvingTestCase(unittest.TestCase):
def setUp(self):
test_config = {
'TESTING': True,
'AUTH_REQUIRED': True,
'CUSTOMER_VIEWS': True,
'PLUGINS': ['reject']
}
self.app = create_app(test_config)
self.client = self.app.test_client()
self.alert = {
'event': 'node_marginal',
'resource': 'node404',
'environment': 'Production',
'service': ['Network'],
'severity': 'warning',
'correlate': ['node_down', 'node_marginal', 'node_up']
}
with self.app.test_request_context('/'):
self.app.preprocess_request()
self.admin_api_key = ApiKey(
user='admin',
scopes=['admin', 'read', 'write'],
text='demo-key'
)
self.customer_api_key = ApiKey(
user='admin',
scopes=['admin', 'read', 'write'],
text='demo-key',
customer='Foo'
)
self.admin_api_key.create()
self.customer_api_key.create()
def tearDown(self):
db.destroy()
def test_alarm_shelving(self):
self.headers = {
'Authorization': 'Key %s' % self.admin_api_key.key,
'Content-type': 'application/json'
}
# new alert should be status=open
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'open')
alert_id = data['id']
# shelve alert
response = self.client.put('/alert/' + alert_id + '/status',
data=json.dumps({'status': 'shelved'}), headers=self.headers)
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id, headers=self.headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# duplicate alert should be status=shelved
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# duplicate alert should be status=shelved (again)
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# increase severity alert should stay status=shelved
self.alert['severity'] = 'major'
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# shelve alert
response = self.client.put('/alert/' + alert_id + '/status',
data=json.dumps({'status': 'shelved'}), headers=self.headers)
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id, headers=self.headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# decrease severity alert should be status=shelved
self.alert['severity'] = 'minor'
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# decrease severity alert should be status=shelved (again)
self.alert['severity'] = 'warning'
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# normal severity alert should be status=closed
self.alert['severity'] = 'ok'
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'closed')
# normal severity alert should be status=closed (again)
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'closed')
# normal severity alert should be status=closed
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'closed')
###
# increase severity alert should be status=shelved
self.alert['severity'] = 'critical'
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# shelve alert
response = self.client.put('/alert/' + alert_id + '/status',
data=json.dumps({'status': 'shelved'}), headers=self.headers)
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id, headers=self.headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# duplicate alert should be status=shelved
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'shelved')
# unshelve alert
response = self.client.put('/alert/' + alert_id + '/status',
data=json.dumps({'status': 'open'}), headers=self.headers)
self.assertEqual(response.status_code, 200)
response = self.client.get('/alert/' + alert_id, headers=self.headers)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'open')
# duplicate alert should be status=open
response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers)
self.assertEqual(response.status_code, 201)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['alert']['status'], 'open')
| 44.321637
| 96
| 0.609843
|
c5a10e84c51ba837652874801a41f7c08a4a03f3
| 5,110
|
py
|
Python
|
contrib/linearize/linearize-hashes.py
|
thelazier/dash
|
22a24ab5b7b42d06a78d8fd092c3351bdf5aafdd
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
thelazier/dash
|
22a24ab5b7b42d06a78d8fd092c3351bdf5aafdd
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
thelazier/dash
|
22a24ab5b7b42d06a78d8fd092c3351bdf5aafdd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from http.client import HTTPConnection
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search(r'^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search(r'^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9998
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| 33.398693
| 108
| 0.607045
|
06c90d4a7bdcf2280f04668fcac33dc256e5d25a
| 666
|
py
|
Python
|
addons/blender-skeletal-motion-animate/core/retargeting.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | null | null | null |
addons/blender-skeletal-motion-animate/core/retargeting.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | null | null | null |
addons/blender-skeletal-motion-animate/core/retargeting.py
|
trisadmeslek/V-Sekai-Blender-tools
|
0d8747387c58584b50c69c61ba50a881319114f8
|
[
"MIT"
] | null | null | null |
import bpy
# This filters the objects shown to only include armatures and under certain conditions
def poll_source_armatures(self, obj):
return obj.type == 'ARMATURE' and obj.animation_data and obj.animation_data.action
def poll_target_armatures(self, obj):
return obj.type == 'ARMATURE' and obj != get_source_armature()
# If the retargeting armatures get changed, clear the bone list
def clear_bone_list(self, context):
context.scene.rsl_retargeting_bone_list.clear()
def get_source_armature():
return bpy.context.scene.rsl_retargeting_armature_source
def get_target_armature():
return bpy.context.scene.rsl_retargeting_armature_target
| 28.956522
| 87
| 0.791291
|
9f6c883dc22cb7623b6dc1e98aaf8b0e68c23d73
| 109
|
py
|
Python
|
playground/optim/optimizer.py
|
rodrigobaron/nn-playground
|
d93b3eba3d54d7602e9adb5895cca10a1e047f2e
|
[
"MIT"
] | 2
|
2018-03-24T18:09:23.000Z
|
2020-01-04T13:14:45.000Z
|
playground/optim/optimizer.py
|
rodrigobaron/nn-playground
|
d93b3eba3d54d7602e9adb5895cca10a1e047f2e
|
[
"MIT"
] | 1
|
2018-04-04T15:34:44.000Z
|
2018-04-04T15:34:44.000Z
|
playground/optim/optimizer.py
|
rodrigobaron/nn-playground
|
d93b3eba3d54d7602e9adb5895cca10a1e047f2e
|
[
"MIT"
] | null | null | null |
class Optimizer:
"""TODO: Optimizer docs"""
def step(self, model):
raise NotImplementedError
| 21.8
| 33
| 0.651376
|
11d2d16cdb5c6124e3babc2ed9324b3eeef5b79d
| 11,062
|
py
|
Python
|
src/two/2_end_to_end.py
|
21stio/python-handson-ml
|
63f3628938109e28d3d9fb894207187ceeaf4c86
|
[
"Apache-2.0"
] | null | null | null |
src/two/2_end_to_end.py
|
21stio/python-handson-ml
|
63f3628938109e28d3d9fb894207187ceeaf4c86
|
[
"Apache-2.0"
] | null | null | null |
src/two/2_end_to_end.py
|
21stio/python-handson-ml
|
63f3628938109e28d3d9fb894207187ceeaf4c86
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import os, tarfile, pandas as pd, numpy as np
import plotly.figure_factory as ff
import plotly.offline as py
from beeprint import pp
import cufflinks as cf
from numpy.polynomial import Polynomial
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, LabelBinarizer, StandardScaler, MinMaxScaler, PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from two.transformers import CombinedAttributesAdder, CategoricalEncoder, IndexSelector, ColumnToIndexTransformer, Inspector, PassThrough
cf.set_config_file(offline=True, theme='pearl')
HOUSING_PATH = "../datasets/housing"
def load_housing_df(housing_path=HOUSING_PATH):
p = os.path.join(housing_path, "housing.csv")
df = pd.read_csv(p)
return df
def prep_housing_df(df):
def prep_index(df):
df.reset_index(inplace=True)
df["id"] = df["longitude"] * 1000 + df["latitude"]
return df
def prep_income_cat(df):
df["income_cat"] = np.ceil(df["median_income"] / 1.5)
df["income_cat"].where(df["income_cat"] < 5, 5.0, inplace=True)
return df
def prep_feature_combis(df):
df["rooms_per_household"] = df["total_rooms"] / df["households"]
df["bedrooms_per_room"] = df["total_bedrooms"] / df["total_rooms"]
df["bedrooms_per_household"] = df["total_bedrooms"] / df["households"]
df["population_per_household"] = df["population"] / df["households"]
return df
df = prep_index(df)
df = prep_income_cat(df)
df = prep_feature_combis(df)
return df
def get_num_df(df):
return df.drop("ocean_proximity", axis=1)
def clean_df(df):
def remove_nan_rows(df):
df.dropna(subset=["total_bedrooms"], inplace=True)
return df, np.nan
def remove_feature(df):
df.drop("total_bedrooms", axis=1, inplace=True)
return df, np.nan
def fill_nan(df):
m = df["total_bedrooms"].median()
df["total_bedrooms"].fillna(m, inplace=True)
return df, m
def impute_nan(df):
num_df = get_num_df(df)
imputer = Imputer(strategy="median")
imputer.fit(num_df)
X = imputer.transform(num_df)
new_df = pd.DataFrame(X, columns=df.columns)
return new_df, np.nan
return remove_nan_rows(df)
def encode_df(df):
def manual(df):
l_encoder = LabelEncoder()
housing_cat = df["ocean_proximity"]
housing_cat_encoded = l_encoder.fit_transform(housing_cat)
oh_encoder = OneHotEncoder()
housing_cat_1hot = oh_encoder.fit_transform(housing_cat_encoded.reshape(-1, 1))
return housing_cat_1hot
def auto(df):
housing_cat = df["ocean_proximity"]
encoder = LabelBinarizer(sparse_output=True)
housing_cat_1hot = encoder.fit_transform(housing_cat)
return housing_cat_1hot
return auto(df)
def visualize(df):
df.iplot(kind='histogram', bins=50, subplots=True, filename='/tmp/histogram-subplots.html', asPlot=True)
# df.scatter_matrix(filename='/tmp/scatter-matrix.html')
df.iplot(
kind="scatter",
x="longitude",
y="latitude",
filename='/tmp/loc.html',
asPlot=True,
)
fig = ff.create_scatterplotmatrix(df[["housing_median_age", "total_rooms", "median_income", "median_house_value", ]], diag='histogram', width=1000, height=1000)
py.plot(fig, filename='/tmp/scatterplotmatrix.html')
def inspect(df):
print("\n\nHEAD")
pp(df.head())
print("\n\nINFO")
pp(df.info())
print("\n\nINCOME_CAT_DIST")
pp(df["income_cat"].value_counts() / len(df))
print("\n\nCORR median_house_value")
corr_matrix = df.corr()
pp(corr_matrix["median_house_value"].sort_values(ascending=False))
def inspect_train_test_sets(train, test):
print(len(train), "train +", len(test), "test")
def split_train_test(df, test_ratio):
shuffled_indices = np.random.permutation(len(df))
test_set_size = int(len(df) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return df.iloc[train_indices], df.iloc[test_indices]
def test_set_check(identifier, test_ration, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ration
def split_train_test_by_id(df, test_ratio, id_column, hash=hashlib.md5):
ids = df[id_column]
in_test_set = ids.apply(lambda _id: test_set_check(_id, test_ratio, hash))
return df.loc[~in_test_set], df.loc[in_test_set]
def stratified_train_test_split(df):
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
train_sets = []
test_sets = []
for train_indices, test_indices in split.split(df, df["income_cat"]):
train_sets.append(df.loc[train_indices])
test_sets.append(df.loc[test_indices])
return train_sets, test_sets
def split(X, y):
# train_set, test_set = split_train_test(df, test_ratio=0.2)
# train_set, test_set = split_train_test_by_id(df, test_ratio=0.2, id_column="index")
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=42)
# train_sets, test_sets = stratified_train_test_split(df)
# train_set, test_set = train_sets[0], test_sets[0]
return train_X, test_X, train_y, test_y
def get_X_y(df, x_cols, y_cols):
X = df[x_cols].values
y = df[y_cols].values
return X, y
def get_X_pipeline(num_ix, cat_ix):
num_pipeline = Pipeline([
('selector', IndexSelector(num_ix)),
# ('inspector', Inspector('num_inspector')),
('imputer', Imputer(strategy='median')),
('attribs_adder', CombinedAttributesAdder()),
('poly', PolynomialFeatures(degree=1)),
# ('std_scaler', MinMaxScaler())
])
cat_pipeline = Pipeline([
('selector', IndexSelector(cat_ix)),
# ('inspector', Inspector('cat_inspector')),
('label_binarizer', CategoricalEncoder(encoding="onehot-dense")),
])
union_pipeline = FeatureUnion(transformer_list=[
('num_pipeline', num_pipeline),
('cat_pipeline', cat_pipeline),
])
p = Pipeline([
# ('inspector:before', Inspector('top_inspector:before')),
('union', union_pipeline),
# ('inspector:after', Inspector('top_inspector:after')),
])
return p
def get_y_pipeline():
p = Pipeline([
# ('std_scaler', MinMaxScaler()),
('pass_through', PassThrough())
])
return p
def evaluate_error(model, y_pipeline, train_X, test_X, train_y, test_y):
y_hat = model.predict(train_X).reshape(-1, 1)
# train_y = y_pipeline.inverse_transform(train_y)
# y_hat = y_pipeline.inverse_transform(y_hat)
train_rmse = mean_squared_error(train_y, y_hat) ** 0.5
print("train rmse: {}".format(train_rmse))
y_hat = model.predict(test_X).reshape(-1, 1)
# test_y = y_pipeline.inverse_transform(test_y)
# y_hat = y_pipeline.inverse_transform(y_hat)
test_rmse = mean_squared_error(test_y, y_hat) ** 0.5
print("test rmse: {}".format(test_rmse))
def predict(model, y_pipeline, X, y_true):
y_hat = model.predict(X).reshape(-1, 1)
print("y_hat: \n")
y_hat = y_pipeline.inverse_transform(y_hat)
print(y_hat)
print("y_true: \n")
y_true = y_pipeline.inverse_transform(y_true)
print(y_true)
def run():
housing_df = load_housing_df()
y_cols = ["median_house_value"]
x_cols = [x for x in list(housing_df.columns) if x not in y_cols]
cat_attribs = ["ocean_proximity"]
num_attribs = [x for x in x_cols if x not in cat_attribs]
X, y = get_X_y(housing_df, x_cols, y_cols)
x_cti_trans = ColumnToIndexTransformer(full_columns=list(x_cols))
cat_ix = x_cti_trans.transform(cat_attribs)
num_ix = x_cti_trans.transform(num_attribs)
train_X, test_X, train_y, test_y = split(X, y)
x_pipeline = get_X_pipeline(num_ix, cat_ix)
train_X = x_pipeline.fit_transform(train_X)
test_X = x_pipeline.transform(test_X)
y_pipeline = get_y_pipeline()
train_y = y_pipeline.fit_transform(train_y)
test_y = y_pipeline.transform(test_y)
model = RandomForestRegressor(warm_start=False, bootstrap=False, max_features=6, n_estimators=80)
def simple_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y):
model.fit(train_X, train_y)
evaluate_error(model, y_pipeline, train_X, test_X, train_y, test_y)
predict(model, y_pipeline, x_pipeline.transform(X[[17606, 18632, 14650, 3230, 3555]]), y_pipeline.transform(y[[17606, 18632, 14650, 3230, 3555]]))
predict(model, y_pipeline, test_X[:5], test_y[:5])
def cross_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y):
scores = cross_val_score(model, train_X, train_y, scoring="neg_mean_squared_error", cv=10)
rmse_scores = np.sqrt(-scores) # ** 0.5
def display_scores(scores):
print("Scores:", y_pipeline.inverse_transform(scores.reshape(-1, 1)))
print("Mean:", y_pipeline.inverse_transform([[scores.mean()]]))
print("Standard deviation:", y_pipeline.inverse_transform([[scores.std()]]))
display_scores(rmse_scores)
def grid_search(model, train_X, train_y):
from sklearn.model_selection import GridSearchCV
param_grid = [
{'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]},
{'bootstrap': [False], 'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]},
]
grid_search = GridSearchCV(model, param_grid, cv=5, n_jobs=os.cpu_count(), scoring="neg_mean_squared_error")
grid_search.fit(train_X, train_y)
pp(grid_search.best_params_)
pp(grid_search.best_estimator_)
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
def feature_importance(model, x_pipeline):
i = model.feature_importances_
extra_attrs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]
# encoder = get_pipeline_object(x_pipeline, ["union", "cat_pipeline", "label_binarizer"])
encoder = x_pipeline.get_params()["union__cat_pipeline__label_binarizer"]
one_hot_classes = list(np.array(encoder.categories_).ravel())
attrs = num_attribs + extra_attrs + one_hot_classes
pp(sorted(zip(i, attrs), reverse=True))
simple_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y)
feature_importance(model, x_pipeline)
# cross_evaluate(model, y_pipeline, train_X, test_X, train_y, test_y)
# grid_search(model, train_X, train_y)
run()
| 29.656836
| 164
| 0.68107
|
b950ba4bebf129d370c31ee9554def70aaa744b8
| 1,521
|
py
|
Python
|
llvmpy/src/Bitcode/ReaderWriter.py
|
KennethNielsen/llvmpy
|
70c5957cfd10f1e32a44f28dcb9a4dc72d499c2e
|
[
"BSD-3-Clause"
] | 140
|
2015-01-07T20:58:12.000Z
|
2022-01-21T17:02:21.000Z
|
llvmpy/src/Bitcode/ReaderWriter.py
|
pfalcon/llvmpy
|
2338eae5f6a91651125b8180abafa8c9afaa90d8
|
[
"BSD-3-Clause"
] | 19
|
2015-01-15T14:45:49.000Z
|
2020-09-04T14:58:23.000Z
|
llvmpy/src/Bitcode/ReaderWriter.py
|
pfalcon/llvmpy
|
2338eae5f6a91651125b8180abafa8c9afaa90d8
|
[
"BSD-3-Clause"
] | 12
|
2015-01-12T01:49:32.000Z
|
2020-07-10T22:30:38.000Z
|
from binding import *
from ..namespace import llvm
from ..ADT.StringRef import StringRef
from ..Module import Module
from ..LLVMContext import LLVMContext
llvm.includes.add('llvm/Bitcode/ReaderWriter.h')
ParseBitCodeFile = llvm.CustomFunction('ParseBitCodeFile',
'llvm_ParseBitCodeFile',
PyObjectPtr, # returns Module*
cast(bytes, StringRef),
ref(LLVMContext),
PyObjectPtr, # file-like object
).require_only(2)
WriteBitcodeToFile = llvm.CustomFunction('WriteBitcodeToFile',
'llvm_WriteBitcodeToFile',
PyObjectPtr, # return None
ptr(Module),
PyObjectPtr, # file-like object
)
getBitcodeTargetTriple = llvm.CustomFunction('getBitcodeTargetTriple',
'llvm_getBitcodeTargetTriple',
PyObjectPtr, # return str
cast(str, StringRef),
ref(LLVMContext),
PyObjectPtr, # file-like object
).require_only(2)
| 49.064516
| 78
| 0.419461
|
d13f328dd58579a1081fa9816be95558e54a2f22
| 26,097
|
py
|
Python
|
scripts/models.py
|
alan-turing-institute/memorization
|
89bd1191f761528234543a69312284212628c24a
|
[
"MIT"
] | 2
|
2022-01-09T11:21:46.000Z
|
2022-01-28T07:38:27.000Z
|
scripts/models.py
|
alan-turing-institute/memorization
|
89bd1191f761528234543a69312284212628c24a
|
[
"MIT"
] | null | null | null |
scripts/models.py
|
alan-turing-institute/memorization
|
89bd1191f761528234543a69312284212628c24a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Model definitions
Author: G.J.J. van den Burg
License: See LICENSE file.
Copyright: 2021, The Alan Turing Institute
"""
import abc
import math
import torch
import torch.nn as nn
from typing import Tuple
from torch.nn.functional import binary_cross_entropy
from torch.nn.functional import softplus
from torch.nn.functional import log_softmax
from constants import LOGIT_LAMBDA
class BaseModel(nn.Module, metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
self._device = "cpu"
@property
def device(self):
return self._device
def to(self, device):
super().to(device)
self._device = device
@abc.abstractmethod
def step(self, batch: torch.Tensor):
pass
@abc.abstractmethod
def loss_function(*args, **kwargs):
pass
class BaseVAE(BaseModel):
@property
def latent_dim(self):
return self._latent_dim
@abc.abstractproperty
def description(self):
"""Description of the model to store in the output files"""
def encode(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""x is expected to be of shape (batch_size, n_channel, height, width)"""
xx = self._encoder(x)
return self._mu(xx), self._logvar(xx)
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""z is expected to be of shape (batch_size, latent_dim)"""
return self._decoder(z)
def reparameterize(
self, mu: torch.Tensor, logvar: torch.Tensor
) -> torch.Tensor:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def forward(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
@abc.abstractmethod
def construct_model(self) -> None:
"""Create the encoder/decoder networks"""
@abc.abstractmethod
def sample(self, z: torch.Tensor) -> torch.Tensor:
"""Take a random sample from the decoder given the latent variable"""
@abc.abstractmethod
def log_pxz(self, pred: torch.Tensor, true: torch.Tensor) -> torch.Tensor:
"""Compute log p(x | z) where pred = decoder(z)"""
# Inputs assumed to be of shape (B, C, H, W)
# Output should be of shape (B,)
def push(self, z: torch.Tensor) -> torch.Tensor:
"""Push a batch of latent vectors through the network"""
return self.reconstruct(self.decode(z))
def reconstruct(self, y: torch.Tensor) -> torch.Tensor:
"""Reconstruct the output of the decoder if necessary"""
return y
def loss_function(
self,
true: torch.Tensor,
pred: torch.Tensor,
mu: torch.Tensor,
logvar: torch.Tensor,
) -> torch.Tensor:
logpxz = self.log_pxz(pred, true)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
REC = -logpxz.sum()
return REC + KLD
def step(self, batch: torch.Tensor) -> torch.Tensor:
"""Run a single step of the model and return the loss"""
x_pred, mu, logvar = self(batch)
loss = self.loss_function(batch, x_pred, mu, logvar)
return loss
class BernoulliMixin:
def log_pxz(self, pred: torch.Tensor, true: torch.Tensor) -> torch.Tensor:
"""Log p(x | z) for Bernoulli decoder"""
BCE = -binary_cross_entropy(pred, true, reduction="none")
BCE = BCE.sum(axis=(1, 2, 3))
return BCE
def sample(self, z: torch.Tensor) -> torch.Tensor:
y = self.decode(z)
return y.bernoulli_()
class DiagonalGaussianMixin:
def loss_function(
self,
true: torch.Tensor,
pred: torch.Tensor,
mu: torch.Tensor,
logvar: torch.Tensor,
) -> torch.Tensor:
logpxz = self.log_pxz_logit(pred, true)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
REC = -logpxz.sum()
return REC + KLD
def log_pxz(self, pred: torch.Tensor, true: torch.Tensor) -> torch.Tensor:
"""Log p(x | z) for Gaussian decoder with constant diagonal cov"""
D = self._in_channels * self._img_dim * self._img_dim
logpx_logit = self.log_pxz_logit(pred, true)
logpx_pixel = (
logpx_logit
+ D * torch.log(1 - 2 * torch.tensor(LOGIT_LAMBDA))
- D * torch.log(torch.tensor(256))
- torch.sum(true - 2 * softplus(true), axis=(1, 2, 3))
)
return logpx_pixel
def log_pxz_logit(
self, Y: torch.Tensor, true: torch.Tensor
) -> torch.Tensor:
"""Log p(x | z) for Gaussian decoder with diagonal cov. (logit space)"""
# Both the data and the model pretend that they're in logit space
# To get bits per dim, see eq (27) of
# https://arxiv.org/pdf/1705.07057.pdf and use log_pxz output
C = self._in_channels
D = self._in_channels * self._img_dim * self._img_dim
assert Y.shape[1] == 2 * C
mu_theta = Y[:, :C, :, :]
logvar_theta = Y[:, C:, :, :]
logvar_theta = torch.clamp(logvar_theta, min=-7.0)
inv_std = torch.exp(-0.5 * logvar_theta)
SSE = torch.sum(
torch.square(inv_std * (true - mu_theta)), axis=(1, 2, 3)
)
out = -0.5 * (
D * math.log(2 * math.pi) + logvar_theta.sum(axis=(1, 2, 3)) + SSE
)
return out
def reconstruct(self, y: torch.Tensor) -> torch.Tensor:
C = self._in_channels
return y[:, :C, :, :]
def sample(self, z: torch.Tensor) -> torch.Tensor:
C = self._in_channels
y = self.decode(z)
mu = y[:, :C, :, :]
logvar = y[:, C:, :, :]
logvar = torch.clamp(logvar, min=-7)
std = torch.exp(0.5 * logvar)
return mu + std * torch.randn_like(mu)
class ConstantGaussianMixin:
def loss_function(
self,
true: torch.Tensor,
pred: torch.Tensor,
mu: torch.Tensor,
logvar: torch.Tensor,
) -> torch.Tensor:
logpxz = self.log_pxz_logit(pred, true)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
REC = -logpxz.sum()
return REC + KLD
def log_pxz(self, pred: torch.Tensor, true: torch.Tensor) -> torch.Tensor:
"""Log p(x | z) for Gaussian decoder with constant diagonal cov"""
D = self._in_channels * self._img_dim * self._img_dim
logpx_logit = self.log_pxz_logit(pred, true)
logpx_pixel = (
logpx_logit
+ D * torch.log(1 - 2 * torch.tensor(LOGIT_LAMBDA))
- D * torch.log(torch.tensor(256))
- torch.sum(true - 2 * softplus(true), axis=(1, 2, 3))
)
return logpx_pixel
def log_pxz_logit(
self, Y: torch.Tensor, true: torch.Tensor
) -> torch.Tensor:
"""Log p(x | z) for Gaussian decoder with learned constant diagonal
cov. (logit space)"""
# Both the data and the model pretend that they're in logit space
# To get bits per dim, see eq (27) of
# https://arxiv.org/pdf/1705.07057.pdf and use log_pxz output
D = self._in_channels * self._img_dim * self._img_dim
mu_theta = Y
inv_gamma = torch.exp(-self.loggamma)
SSE = torch.sum(
inv_gamma * torch.square(true - mu_theta), axis=(1, 2, 3)
)
out = -0.5 * (D * math.log(2 * math.pi) + D * self.loggamma + SSE)
return out
def sample(self, z: torch.Tensor) -> torch.Tensor:
y = self.decode(z)
std = torch.exp(0.5 * self.loggamma)
return y + std * torch.randn_like(y)
class MixLogisticsMixin:
def log_pxz(self, pred: torch.Tensor, X: torch.Tensor) -> torch.Tensor:
"""Log p(x | z) for mixture of discrete logistics decoder"""
# pred is the output of the decoder
# This implementation is based on the original PixelCNN++ code and the
# NVAE code, as well as various online sources on this topic.
# -- Input validation --
K = self._num_mixture
B, M, H_, W_ = pred.shape
B, C, H, W = X.shape
assert C == 3 and H == H_ and W == W_ and M == 10 * K
assert X.min() >= 0.0 and X.max() <= 1.0
# -- Extract decoder output --
# pred has 10*K channels: log mixture components (K), means (3*K),
# log scales (3*K), alpha (K), beta (K), and gamma (K).
log_mixture_comp = pred[:, :K, :, :] # B, K, H, W
means = pred[:, K : 4 * K, :, :] # B, 3K, H, W
log_scales = pred[:, 4 * K : 7 * K, :, :] # B, 3K, H, W
alpha = pred[:, 7 * K : 8 * K, :, :] # B, K, H, W
beta = pred[:, 8 * K : 9 * K, :, :] # B, K, H, W
gamma = pred[:, 9 * K : 10 * K, :, :] # B, K, H, W
# -- Clipping and mapping --
# Map the X values to [-1, 1]
X = 2 * X - 1
# Clamp log scales to avoid exploding scale values
log_scales = log_scales.clamp(min=-7.0)
# Keep coefficients between -1 and +1
alpha = torch.tanh(alpha)
beta = torch.tanh(beta)
gamma = torch.tanh(gamma)
# -- Reconfigure for easier computation --
# Replicate X into another dimension
X = X.unsqueeze(4) # B, C, H, W, 1
X = X.expand(-1, -1, -1, -1, K) # B, C, H, W, K
X = X.permute(0, 1, 4, 2, 3) # B, C, K, H, W
# Reshape the means and logscales to match
means = means.view(B, C, K, H, W)
log_scales = log_scales.view(B, C, K, H, W)
# -- Compute the means for the different subpixels --
mean_r = means[:, 0, :, :, :] # B, 1, K, H, W
mean_g = means[:, 1, :, :, :] + alpha * X[:, 0, :, :, :]
mean_b = (
means[:, 2, :, :, :]
+ beta * X[:, 0, :, :, :]
+ gamma * X[:, 1, :, :, :]
)
# Combine means
mean_r = mean_r.unsqueeze(1)
mean_g = mean_g.unsqueeze(1)
mean_b = mean_b.unsqueeze(1)
means = torch.cat([mean_r, mean_g, mean_b], axis=1) # B, C, K, H, W
# Compute x - mu for each channel and mixture component
centered = X - means # B, C, K, H, W
# Compute inverse scale of logistics
inv_scale = torch.exp(-log_scales)
# Compute U_plus = (x + 1/2 - mu)/s and U_min = (x - 1/2 - mu)/s.
# Because x is in [-1, 1] instead of [0, 255], 1/2 becomes 1/255.
U_plus = inv_scale * (centered + 1.0 / 255.0)
U_min = inv_scale * (centered - 1.0 / 255.0)
# Apply sigmoid and compute difference (for non edge-case)
cdf_plus = torch.sigmoid(U_plus) # B, C, K, H, W
cdf_min = torch.sigmoid(U_min) # B, C, K, H, W
cdf_delta = cdf_plus - cdf_min # B, C, K, H, W
# -- Compute values for edge cases --
# For x = 0
log_cdf_plus = U_plus - softplus(U_plus)
# For x = 255
log_one_minus_cdf_min = -softplus(U_min)
# Midpoint fix. When cdf_delta is very small (here, smaller than 1e-5),
# the difference in CDF values is small. Thus, the small difference in
# CDF can be approximated by a derivative. Recall that for a CDF F(x)
# and a PDF f(x) we have lim_{t -> 0} (F(x + t) - F(x - t))/(2*t) =
# f(x). So here the PixelCNN++ authors approximate F(x + t) - F(x - t)
# by 2*t*f(x). And since we take logs and t = 1/255, we get log(2 *
# 1/255) = log(127.5) and log f(x). This gives for log f(x) (pdf of
# logistic distribution):
U_mid = inv_scale * centered
log_pdf_mid = U_mid - log_scales - 2.0 * softplus(U_mid)
# -- Combine log probabilitie --
# Compute safe (non-weighted) log prob for non edge-cases
# Note that clamp on log(cdf_delta) is needed for backprop (nan can
# occur)
log_prob_mid = torch.where(
cdf_delta > 1e-5,
torch.log(torch.clamp(cdf_delta, min=1e-10)),
log_pdf_mid - torch.log(torch.tensor(255.0 / 2)),
)
# Determine boundaries for edge cases
# NOTE: This differs slightly from other implementations, but
# corresponds to the theoretical values.
left = 0.5 / 255 * 2 - 1 # right boundary for x=0 on [-1, 1]
right = (255 - 0.5) / 255 * 2 - 1 # left boundary for x=255 on [-1, 1]
# Compute (non-weighted) log prob for all cases
log_prob = torch.where(
X < left,
log_cdf_plus,
torch.where(X > right, log_one_minus_cdf_min, log_prob_mid),
)
# Sum over channels (channel probs are multiplied, so log probs sum)
# and weight with mixture component weights (in log space, so we use
# log_softmax to ensure mixture_comp sums to 1).
log_prob = log_prob.sum(axis=1) + log_softmax(log_mixture_comp, dim=1)
# log prob is (B, K, H, W), so we logsumexp over everything but B
return torch.logsumexp(log_prob, dim=(1, 2, 3))
class BernoulliMLPVAE(BernoulliMixin, BaseVAE):
_layers = [512, 256]
def __init__(
self,
img_dim: int = 32,
latent_dim: int = 2,
in_channels: int = 1,
**kwargs,
):
super().__init__()
self._img_dim = img_dim
self._latent_dim = latent_dim
self._in_channels = in_channels
self.construct_model()
@property
def description(self):
layers = "-".join(map(str, self._layers))
latent = str(self._latent_dim)
d = f"{self.__class__.__name__}_{layers}-{latent}"
return d
def construct_model(self):
C = self._in_channels
D = self._img_dim
L = self._latent_dim
input_shape = D * D * C
encoder = [nn.Flatten()]
prev_dim = input_shape
for l in self._layers:
encoder.append(nn.Linear(prev_dim, l))
encoder.append(nn.ReLU(True))
prev_dim = l
self._encoder = nn.Sequential(*encoder)
self._mu = nn.Linear(prev_dim, L)
self._logvar = nn.Linear(prev_dim, L)
decoder = []
prev_dim = L
for l in reversed(self._layers):
decoder.append(nn.Linear(prev_dim, l))
decoder.append(nn.ReLU(True))
prev_dim = l
decoder.append(nn.Linear(prev_dim, input_shape))
decoder.append(nn.Sigmoid())
decoder.append(nn.Unflatten(1, (C, D, D)))
self._decoder = nn.Sequential(*decoder)
class BernoulliDCVAE(BernoulliMixin, BaseVAE):
def __init__(
self,
img_dim: int = 32,
latent_dim: int = 2,
in_channels: int = 1,
num_feature: int = 64,
):
super().__init__()
self._img_dim = img_dim
self._latent_dim = latent_dim
self._in_channels = in_channels
self._num_feature = num_feature
self.construct_model()
@property
def description(self):
d = f"{self.__class__.__name__}_NF{self._num_feature}-L{self._latent_dim}"
return d
def construct_model(self):
C = self._in_channels
D = self._img_dim
L = self._latent_dim
F = self._num_feature
# Model is designed for 32x32 input/output
assert D == 32
self._encoder = nn.Sequential(
nn.Conv2d(C, F, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F) x 16 x 16
nn.Conv2d(F, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*2) x 8 x 8
nn.Conv2d(F * 2, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*4) x 4 x 4
nn.Conv2d(F * 4, F * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size (F*8) x 2 x 2
# Flatten layer
nn.Flatten(),
)
prev_dim = int((F * 8) * 2 * 2)
self._mu = nn.Linear(prev_dim, L)
self._logvar = nn.Linear(prev_dim, L)
self._decoder = nn.Sequential(
# input is Z, going into a convolution
# NOTE: Using kernel_size = 2 here to get 32x32 output
nn.Unflatten(1, (L, 1, 1)),
nn.ConvTranspose2d(L, F * 8, 2, 1, 0, bias=False),
nn.BatchNorm2d(F * 8),
nn.ReLU(True),
# state size. (F*8) x 2 x 2
nn.ConvTranspose2d(F * 8, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.ReLU(True),
# state size. (F*4) x 4 x 4
nn.ConvTranspose2d(F * 4, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.ReLU(True),
# state size. (F*2) x 8 x 8
nn.ConvTranspose2d(F * 2, F, 4, 2, 1, bias=False),
nn.BatchNorm2d(F),
nn.ReLU(True),
# state size. (F) x 16 x 16
nn.ConvTranspose2d(F, C, 4, 2, 1, bias=False),
nn.Sigmoid(),
# state size. (C) x 32 x 32
)
class MixLogisticsDCVAE(MixLogisticsMixin, BaseVAE):
def __init__(
self,
img_dim: int = 32,
latent_dim: int = 2,
in_channels: int = 3,
num_feature: int = 32,
num_mixture: int = 5,
):
super().__init__()
if not in_channels == 3:
raise NotImplementedError
self._img_dim = img_dim
self._latent_dim = latent_dim
self._in_channels = in_channels
self._num_feature = num_feature
self._num_mixture = num_mixture
self.construct_model()
@property
def description(self):
d = f"{self.__class__.__name__}_NF{self._num_feature}-L{self._latent_dim}"
return d
def construct_model(self):
K = self._num_mixture
C = self._in_channels
D = self._img_dim
L = self._latent_dim
F = self._num_feature
# Model is designed for 32x32 input/output
assert D == 32
self._encoder = nn.Sequential(
nn.Conv2d(C, F, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F) x 16 x 16
nn.Conv2d(F, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*2) x 8 x 8
nn.Conv2d(F * 2, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*4) x 4 x 4
nn.Conv2d(F * 4, F * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size (F*8) x 2 x 2
# Flatten layer
nn.Flatten(),
)
prev_dim = int((F * 8) * 2 * 2)
self._mu = nn.Linear(prev_dim, L)
self._logvar = nn.Linear(prev_dim, L)
self._decoder = nn.Sequential(
# input is Z, going into a convolution
# NOTE: Using kernel_size = 2 here to get 32x32 output
nn.Unflatten(1, (L, 1, 1)),
nn.ConvTranspose2d(L, F * 8, 2, 1, 0, bias=False),
nn.BatchNorm2d(F * 8),
nn.ReLU(True),
# state size. (F*8) x 2 x 2
nn.ConvTranspose2d(F * 8, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.ReLU(True),
# state size. (F*4) x 4 x 4
nn.ConvTranspose2d(F * 4, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.ReLU(True),
# state size. (F*2) x 8 x 8
nn.ConvTranspose2d(F * 2, F, 4, 2, 1, bias=False),
nn.BatchNorm2d(F),
nn.ReLU(True),
# state size. (F) x 16 x 16
nn.ConvTranspose2d(F, 10 * K, 4, 2, 1, bias=False),
nn.Sigmoid(),
# state size. (10K) x 32 x 32
)
class DiagonalGaussianDCVAE(DiagonalGaussianMixin, BaseVAE):
def __init__(
self,
img_dim: int = 32,
latent_dim: int = 2,
in_channels: int = 3,
num_feature: int = 64,
):
super().__init__()
self._img_dim = img_dim
self._latent_dim = latent_dim
self._in_channels = in_channels
self._num_feature = num_feature
self.construct_model()
@property
def description(self):
d = f"{self.__class__.__name__}_NF{self._num_feature}-L{self._latent_dim}"
return d
def construct_model(self):
C = self._in_channels
F = self._num_feature
L = self._latent_dim
self._encoder = nn.Sequential(
nn.Conv2d(C, F, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F) x 16 x 16
nn.Conv2d(F, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*2) x 8 x 8
nn.Conv2d(F * 2, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*4) x 4 x 4
nn.Conv2d(F * 4, F * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size (F*8) x 2 x 2
# Flatten layer
nn.Flatten(),
)
prev_dim = int((F * 8) * 2 * 2)
self._mu = nn.Linear(prev_dim, L)
self._logvar = nn.Linear(prev_dim, L)
self._decoder = nn.Sequential(
# input is Z, going into a convolution
# NOTE: Using kernel_size = 2 here to get 32x32 output
nn.Unflatten(1, (L, 1, 1)),
nn.ConvTranspose2d(L, F * 8, 2, 1, 0, bias=False),
nn.BatchNorm2d(F * 8),
nn.ReLU(True),
# state size. (F*8) x 2 x 2
nn.ConvTranspose2d(F * 8, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.ReLU(True),
# state size. (F*4) x 4 x 4
nn.ConvTranspose2d(F * 4, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.ReLU(True),
# state size. (F*2) x 8 x 8
nn.ConvTranspose2d(F * 2, F, 4, 2, 1, bias=False),
nn.BatchNorm2d(F),
nn.ReLU(True),
# state size. (F) x 16 x 16
nn.ConvTranspose2d(F, 2 * C, 4, 2, 1, bias=False),
# state size. (2C) x 32 x 32
)
class ConstantGaussianDCVAE(ConstantGaussianMixin, BaseVAE):
def __init__(
self,
img_dim: int = 32,
latent_dim: int = 2,
in_channels: int = 3,
num_feature: int = 64,
):
super().__init__()
self._img_dim = img_dim
self._latent_dim = latent_dim
self._in_channels = in_channels
self._num_feature = num_feature
self.construct_model()
@property
def description(self):
d = f"{self.__class__.__name__}_NF{self._num_feature}-L{self._latent_dim}"
return d
def construct_model(self):
C = self._in_channels
F = self._num_feature
L = self._latent_dim
self.loggamma = nn.Parameter(torch.tensor(-2.0))
self._encoder = nn.Sequential(
nn.Conv2d(C, F, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F) x 16 x 16
nn.Conv2d(F, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*2) x 8 x 8
nn.Conv2d(F * 2, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (F*4) x 4 x 4
nn.Conv2d(F * 4, F * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size (F*8) x 2 x 2
# Flatten layer
nn.Flatten(),
)
prev_dim = int((F * 8) * 2 * 2)
self._mu = nn.Linear(prev_dim, L)
self._logvar = nn.Linear(prev_dim, L)
self._decoder = nn.Sequential(
# input is Z, going into a convolution
# NOTE: Using kernel_size = 2 here to get 32x32 output
nn.Unflatten(1, (L, 1, 1)),
nn.ConvTranspose2d(L, F * 8, 2, 1, 0, bias=False),
nn.BatchNorm2d(F * 8),
nn.ReLU(True),
# state size. (F*8) x 2 x 2
nn.ConvTranspose2d(F * 8, F * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 4),
nn.ReLU(True),
# state size. (F*4) x 4 x 4
nn.ConvTranspose2d(F * 4, F * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(F * 2),
nn.ReLU(True),
# state size. (F*2) x 8 x 8
nn.ConvTranspose2d(F * 2, F, 4, 2, 1, bias=False),
nn.BatchNorm2d(F),
nn.ReLU(True),
# state size. (F) x 16 x 16
nn.ConvTranspose2d(F, C, 4, 2, 1, bias=False),
# state size. (2C) x 32 x 32
)
def load_model(
model_type: str,
img_dim: int,
latent_dim: int = 32,
in_channels: int = 1,
num_feature: int = 32,
) -> BaseModel:
model_cls = MODELS.get(model_type, None)
if model_cls is None:
raise ValueError(f"Unknown model type {model_type}")
model = model_cls(
img_dim,
latent_dim=latent_dim,
in_channels=in_channels,
num_feature=num_feature,
)
return model
MODELS = {
"BernoulliMLPVAE": BernoulliMLPVAE,
"BernoulliDCVAE": BernoulliDCVAE,
"MixLogisticsDCVAE": MixLogisticsDCVAE,
"DiagonalGaussianDCVAE": DiagonalGaussianDCVAE,
"ConstantGaussianDCVAE": ConstantGaussianDCVAE,
}
| 34.069191
| 82
| 0.539411
|
20876b64aeafb5517da55b5f44543ea2b2e23a46
| 389
|
py
|
Python
|
app/litrev/asgi.py
|
ipa/litrev
|
01550417872b0e2b0c0541a3b7f6b28d18d874c9
|
[
"MIT"
] | 2
|
2020-04-09T11:46:36.000Z
|
2022-02-01T00:56:11.000Z
|
app/litrev/asgi.py
|
ipa/litrev
|
01550417872b0e2b0c0541a3b7f6b28d18d874c9
|
[
"MIT"
] | 5
|
2021-03-30T13:01:04.000Z
|
2021-09-22T18:50:40.000Z
|
app/litrev/asgi.py
|
ipa/litrev
|
01550417872b0e2b0c0541a3b7f6b28d18d874c9
|
[
"MIT"
] | null | null | null |
"""
ASGI config for litrev project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'litrev.settings')
application = get_asgi_application()
| 22.882353
| 78
| 0.784062
|
9a77b8d38aaf5e96299bb77c60864ff29314e9d2
| 27,273
|
py
|
Python
|
python2.7/site-packages/twisted/conch/scripts/cftp.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
python2.7/site-packages/twisted/conch/scripts/cftp.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
python2.7/site-packages/twisted/conch/scripts/cftp.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
# -*- test-case-name: twisted.conch.test.test_cftp -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# $Id: cftp.py,v 1.65 2004/03/11 00:29:14 z3p Exp $
#""" Implementation module for the `cftp` command.
#"""
from twisted.conch.client import agent, connect, default, options
from twisted.conch.error import ConchError
from twisted.conch.ssh import connection, common
from twisted.conch.ssh import channel, filetransfer
from twisted.protocols import basic
from twisted.internet import reactor, stdio, defer, utils
from twisted.python import log, usage, failure
import os, sys, getpass, struct, tty, fcntl, base64, signal, stat, errno
import fnmatch, pwd, time, glob
class ClientOptions(options.ConchOptions):
synopsis = """Usage: cftp [options] [user@]host
cftp [options] [user@]host[:dir[/]]
cftp [options] [user@]host[:file [localfile]]
"""
optParameters = [
['buffersize', 'B', 32768, 'Size of the buffer to use for sending/receiving.'],
['batchfile', 'b', None, 'File to read commands from, or \'-\' for stdin.'],
['requests', 'R', 5, 'Number of requests to make before waiting for a reply.'],
['subsystem', 's', 'sftp', 'Subsystem/server program to connect to.']]
zsh_altArgDescr = {"buffersize":"Size of send/receive buffer (default: 32768)"}
#zsh_multiUse = ["foo", "bar"]
#zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")]
#zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)"}
#zsh_actionDescr = {"logfile":"log file name", "random":"random seed"}
zsh_extras = ['2::localfile:{if [[ $words[1] == *:* ]]; then; _files; fi}']
def parseArgs(self, host, localPath=None):
self['remotePath'] = ''
if ':' in host:
host, self['remotePath'] = host.split(':', 1)
self['remotePath'].rstrip('/')
self['host'] = host
self['localPath'] = localPath
def run():
# import hotshot
# prof = hotshot.Profile('cftp.prof')
# prof.start()
args = sys.argv[1:]
if '-l' in args: # cvs is an idiot
i = args.index('-l')
args = args[i:i+2]+args
del args[i+2:i+4]
options = ClientOptions()
try:
options.parseOptions(args)
except usage.UsageError, u:
print 'ERROR: %s' % u
sys.exit(1)
if options['log']:
realout = sys.stdout
log.startLogging(sys.stderr)
sys.stdout = realout
else:
log.discardLogs()
doConnect(options)
reactor.run()
# prof.stop()
# prof.close()
def handleError():
from twisted.python import failure
global exitStatus
exitStatus = 2
try:
reactor.stop()
except: pass
log.err(failure.Failure())
raise
def doConnect(options):
# log.deferr = handleError # HACK
if '@' in options['host']:
options['user'], options['host'] = options['host'].split('@',1)
host = options['host']
if not options['user']:
options['user'] = getpass.getuser()
if not options['port']:
options['port'] = 22
else:
options['port'] = int(options['port'])
host = options['host']
port = options['port']
conn = SSHConnection()
conn.options = options
vhk = default.verifyHostKey
uao = default.SSHUserAuthClient(options['user'], options, conn)
connect.connect(host, port, options, vhk, uao).addErrback(_ebExit)
def _ebExit(f):
#global exitStatus
if hasattr(f.value, 'value'):
s = f.value.value
else:
s = str(f)
print s
#exitStatus = "conch: exiting with error %s" % f
try:
reactor.stop()
except: pass
def _ignore(*args): pass
class FileWrapper:
def __init__(self, f):
self.f = f
self.total = 0.0
f.seek(0, 2) # seek to the end
self.size = f.tell()
def __getattr__(self, attr):
return getattr(self.f, attr)
class StdioClient(basic.LineReceiver):
ps = 'cftp> '
delimiter = '\n'
def __init__(self, client, f = None):
self.client = client
self.currentDirectory = ''
self.file = f
self.useProgressBar = (not f and 1) or 0
def connectionMade(self):
self.client.realPath('').addCallback(self._cbSetCurDir)
def _cbSetCurDir(self, path):
self.currentDirectory = path
self._newLine()
def lineReceived(self, line):
if self.client.transport.localClosed:
return
log.msg('got line %s' % repr(line))
line = line.lstrip()
if not line:
self._newLine()
return
if self.file and line.startswith('-'):
self.ignoreErrors = 1
line = line[1:]
else:
self.ignoreErrors = 0
if ' ' in line:
command, rest = line.split(' ', 1)
rest = rest.lstrip()
else:
command, rest = line, ''
if command.startswith('!'): # command
f = self.cmd_EXEC
rest = (command[1:] + ' ' + rest).strip()
else:
command = command.upper()
log.msg('looking up cmd %s' % command)
f = getattr(self, 'cmd_%s' % command, None)
if f is not None:
d = defer.maybeDeferred(f, rest)
d.addCallback(self._cbCommand)
d.addErrback(self._ebCommand)
else:
self._ebCommand(failure.Failure(NotImplementedError(
"No command called `%s'" % command)))
self._newLine()
def _printFailure(self, f):
log.msg(f)
e = f.trap(NotImplementedError, filetransfer.SFTPError, OSError, IOError)
if e == NotImplementedError:
self.transport.write(self.cmd_HELP(''))
elif e == filetransfer.SFTPError:
self.transport.write("remote error %i: %s\n" %
(f.value.code, f.value.message))
elif e in (OSError, IOError):
self.transport.write("local error %i: %s\n" %
(f.value.errno, f.value.strerror))
def _newLine(self):
if self.client.transport.localClosed:
return
self.transport.write(self.ps)
self.ignoreErrors = 0
if self.file:
l = self.file.readline()
if not l:
self.client.transport.loseConnection()
else:
self.transport.write(l)
self.lineReceived(l.strip())
def _cbCommand(self, result):
if result is not None:
self.transport.write(result)
if not result.endswith('\n'):
self.transport.write('\n')
self._newLine()
def _ebCommand(self, f):
self._printFailure(f)
if self.file and not self.ignoreErrors:
self.client.transport.loseConnection()
self._newLine()
def cmd_CD(self, path):
path, rest = self._getFilename(path)
if not path.endswith('/'):
path += '/'
newPath = path and os.path.join(self.currentDirectory, path) or ''
d = self.client.openDirectory(newPath)
d.addCallback(self._cbCd)
d.addErrback(self._ebCommand)
return d
def _cbCd(self, directory):
directory.close()
d = self.client.realPath(directory.name)
d.addCallback(self._cbCurDir)
return d
def _cbCurDir(self, path):
self.currentDirectory = path
def cmd_CHGRP(self, rest):
grp, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
grp = int(grp)
d = self.client.getAttrs(path)
d.addCallback(self._cbSetUsrGrp, path, grp=grp)
return d
def cmd_CHMOD(self, rest):
mod, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
mod = int(mod, 8)
d = self.client.setAttrs(path, {'permissions':mod})
d.addCallback(_ignore)
return d
def cmd_CHOWN(self, rest):
usr, rest = rest.split(None, 1)
path, rest = self._getFilename(rest)
usr = int(usr)
d = self.client.getAttrs(path)
d.addCallback(self._cbSetUsrGrp, path, usr=usr)
return d
def _cbSetUsrGrp(self, attrs, path, usr=None, grp=None):
new = {}
new['uid'] = (usr is not None) and usr or attrs['uid']
new['gid'] = (grp is not None) and grp or attrs['gid']
d = self.client.setAttrs(path, new)
d.addCallback(_ignore)
return d
def cmd_GET(self, rest):
remote, rest = self._getFilename(rest)
if '*' in remote or '?' in remote: # wildcard
if rest:
local, rest = self._getFilename(rest)
if not os.path.isdir(local):
return "Wildcard get with non-directory target."
else:
local = ''
d = self._remoteGlob(remote)
d.addCallback(self._cbGetMultiple, local)
return d
if rest:
local, rest = self._getFilename(rest)
else:
local = os.path.split(remote)[1]
log.msg((remote, local))
lf = file(local, 'w', 0)
path = os.path.join(self.currentDirectory, remote)
d = self.client.openFile(path, filetransfer.FXF_READ, {})
d.addCallback(self._cbGetOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
return d
def _cbGetMultiple(self, files, local):
#if self._useProgressBar: # one at a time
# XXX this can be optimized for times w/o progress bar
return self._cbGetMultipleNext(None, files, local)
def _cbGetMultipleNext(self, res, files, local):
if isinstance(res, failure.Failure):
self._printFailure(res)
elif res:
self.transport.write(res)
if not res.endswith('\n'):
self.transport.write('\n')
if not files:
return
f = files.pop(0)[0]
lf = file(os.path.join(local, os.path.split(f)[1]), 'w', 0)
path = os.path.join(self.currentDirectory, f)
d = self.client.openFile(path, filetransfer.FXF_READ, {})
d.addCallback(self._cbGetOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
d.addBoth(self._cbGetMultipleNext, files, local)
return d
def _ebCloseLf(self, f, lf):
lf.close()
return f
def _cbGetOpenFile(self, rf, lf):
return rf.getAttrs().addCallback(self._cbGetFileSize, rf, lf)
def _cbGetFileSize(self, attrs, rf, lf):
if not stat.S_ISREG(attrs['permissions']):
rf.close()
lf.close()
return "Can't get non-regular file: %s" % rf.name
rf.size = attrs['size']
bufferSize = self.client.transport.conn.options['buffersize']
numRequests = self.client.transport.conn.options['requests']
rf.total = 0.0
dList = []
chunks = []
startTime = time.time()
for i in range(numRequests):
d = self._cbGetRead('', rf, lf, chunks, 0, bufferSize, startTime)
dList.append(d)
dl = defer.DeferredList(dList, fireOnOneErrback=1)
dl.addCallback(self._cbGetDone, rf, lf)
return dl
def _getNextChunk(self, chunks):
end = 0
for chunk in chunks:
if end == 'eof':
return # nothing more to get
if end != chunk[0]:
i = chunks.index(chunk)
chunks.insert(i, (end, chunk[0]))
return (end, chunk[0] - end)
end = chunk[1]
bufSize = int(self.client.transport.conn.options['buffersize'])
chunks.append((end, end + bufSize))
return (end, bufSize)
def _cbGetRead(self, data, rf, lf, chunks, start, size, startTime):
if data and isinstance(data, failure.Failure):
log.msg('get read err: %s' % data)
reason = data
reason.trap(EOFError)
i = chunks.index((start, start + size))
del chunks[i]
chunks.insert(i, (start, 'eof'))
elif data:
log.msg('get read data: %i' % len(data))
lf.seek(start)
lf.write(data)
if len(data) != size:
log.msg('got less than we asked for: %i < %i' %
(len(data), size))
i = chunks.index((start, start + size))
del chunks[i]
chunks.insert(i, (start, start + len(data)))
rf.total += len(data)
if self.useProgressBar:
self._printProgessBar(rf, startTime)
chunk = self._getNextChunk(chunks)
if not chunk:
return
else:
start, length = chunk
log.msg('asking for %i -> %i' % (start, start+length))
d = rf.readChunk(start, length)
d.addBoth(self._cbGetRead, rf, lf, chunks, start, length, startTime)
return d
def _cbGetDone(self, ignored, rf, lf):
log.msg('get done')
rf.close()
lf.close()
if self.useProgressBar:
self.transport.write('\n')
return "Transferred %s to %s" % (rf.name, lf.name)
def cmd_PUT(self, rest):
local, rest = self._getFilename(rest)
if '*' in local or '?' in local: # wildcard
if rest:
remote, rest = self._getFilename(rest)
path = os.path.join(self.currentDirectory, remote)
d = self.client.getAttrs(path)
d.addCallback(self._cbPutTargetAttrs, remote, local)
return d
else:
remote = ''
files = glob.glob(local)
return self._cbPutMultipleNext(None, files, remote)
if rest:
remote, rest = self._getFilename(rest)
else:
remote = os.path.split(local)[1]
lf = file(local, 'r')
path = os.path.join(self.currentDirectory, remote)
d = self.client.openFile(path, filetransfer.FXF_WRITE|filetransfer.FXF_CREAT, {})
d.addCallback(self._cbPutOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
return d
def _cbPutTargetAttrs(self, attrs, path, local):
if not stat.S_ISDIR(attrs['permissions']):
return "Wildcard put with non-directory target."
return self._cbPutMultipleNext(None, files, path)
def _cbPutMultipleNext(self, res, files, path):
if isinstance(res, failure.Failure):
self._printFailure(res)
elif res:
self.transport.write(res)
if not res.endswith('\n'):
self.transport.write('\n')
f = None
while files and not f:
try:
f = files.pop(0)
lf = file(f, 'r')
except:
self._printFailure(failure.Failure())
f = None
if not f:
return
name = os.path.split(f)[1]
remote = os.path.join(self.currentDirectory, path, name)
log.msg((name, remote, path))
d = self.client.openFile(remote, filetransfer.FXF_WRITE|filetransfer.FXF_CREAT, {})
d.addCallback(self._cbPutOpenFile, lf)
d.addErrback(self._ebCloseLf, lf)
d.addBoth(self._cbPutMultipleNext, files, path)
return d
def _cbPutOpenFile(self, rf, lf):
numRequests = self.client.transport.conn.options['requests']
if self.useProgressBar:
lf = FileWrapper(lf)
dList = []
chunks = []
startTime = time.time()
for i in range(numRequests):
d = self._cbPutWrite(None, rf, lf, chunks, startTime)
if d:
dList.append(d)
dl = defer.DeferredList(dList, fireOnOneErrback=1)
dl.addCallback(self._cbPutDone, rf, lf)
return dl
def _cbPutWrite(self, ignored, rf, lf, chunks, startTime):
chunk = self._getNextChunk(chunks)
start, size = chunk
lf.seek(start)
data = lf.read(size)
if self.useProgressBar:
lf.total += len(data)
self._printProgessBar(lf, startTime)
if data:
d = rf.writeChunk(start, data)
d.addCallback(self._cbPutWrite, rf, lf, chunks, startTime)
return d
else:
return
def _cbPutDone(self, ignored, rf, lf):
lf.close()
rf.close()
if self.useProgressBar:
self.transport.write('\n')
return 'Transferred %s to %s' % (lf.name, rf.name)
def cmd_LCD(self, path):
os.chdir(path)
def cmd_LN(self, rest):
linkpath, rest = self._getFilename(rest)
targetpath, rest = self._getFilename(rest)
linkpath, targetpath = map(
lambda x: os.path.join(self.currentDirectory, x),
(linkpath, targetpath))
return self.client.makeLink(linkpath, targetpath).addCallback(_ignore)
def cmd_LS(self, rest):
# possible lines:
# ls current directory
# ls name_of_file that file
# ls name_of_directory that directory
# ls some_glob_string current directory, globbed for that string
options = []
rest = rest.split()
while rest and rest[0] and rest[0][0] == '-':
opts = rest.pop(0)[1:]
for o in opts:
if o == 'l':
options.append('verbose')
elif o == 'a':
options.append('all')
rest = ' '.join(rest)
path, rest = self._getFilename(rest)
if not path:
fullPath = self.currentDirectory + '/'
else:
fullPath = os.path.join(self.currentDirectory, path)
d = self._remoteGlob(fullPath)
d.addCallback(self._cbDisplayFiles, options)
return d
def _cbDisplayFiles(self, files, options):
files.sort()
if 'all' not in options:
files = [f for f in files if not f[0].startswith('.')]
if 'verbose' in options:
lines = [f[1] for f in files]
else:
lines = [f[0] for f in files]
if not lines:
return None
else:
return '\n'.join(lines)
def cmd_MKDIR(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.makeDirectory(path, {}).addCallback(_ignore)
def cmd_RMDIR(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.removeDirectory(path).addCallback(_ignore)
def cmd_LMKDIR(self, path):
os.system("mkdir %s" % path)
def cmd_RM(self, path):
path, rest = self._getFilename(path)
path = os.path.join(self.currentDirectory, path)
return self.client.removeFile(path).addCallback(_ignore)
def cmd_LLS(self, rest):
os.system("ls %s" % rest)
def cmd_RENAME(self, rest):
oldpath, rest = self._getFilename(rest)
newpath, rest = self._getFilename(rest)
oldpath, newpath = map (
lambda x: os.path.join(self.currentDirectory, x),
(oldpath, newpath))
return self.client.renameFile(oldpath, newpath).addCallback(_ignore)
def cmd_EXIT(self, ignored):
self.client.transport.loseConnection()
cmd_QUIT = cmd_EXIT
def cmd_VERSION(self, ignored):
return "SFTP version %i" % self.client.version
def cmd_HELP(self, ignored):
return """Available commands:
cd path Change remote directory to 'path'.
chgrp gid path Change gid of 'path' to 'gid'.
chmod mode path Change mode of 'path' to 'mode'.
chown uid path Change uid of 'path' to 'uid'.
exit Disconnect from the server.
get remote-path [local-path] Get remote file.
help Get a list of available commands.
lcd path Change local directory to 'path'.
lls [ls-options] [path] Display local directory listing.
lmkdir path Create local directory.
ln linkpath targetpath Symlink remote file.
lpwd Print the local working directory.
ls [-l] [path] Display remote directory listing.
mkdir path Create remote directory.
progress Toggle progress bar.
put local-path [remote-path] Put local file.
pwd Print the remote working directory.
quit Disconnect from the server.
rename oldpath newpath Rename remote file.
rmdir path Remove remote directory.
rm path Remove remote file.
version Print the SFTP version.
? Synonym for 'help'.
"""
def cmd_PWD(self, ignored):
return self.currentDirectory
def cmd_LPWD(self, ignored):
return os.getcwd()
def cmd_PROGRESS(self, ignored):
self.useProgressBar = not self.useProgressBar
return "%ssing progess bar." % (self.useProgressBar and "U" or "Not u")
def cmd_EXEC(self, rest):
shell = pwd.getpwnam(getpass.getuser())[6]
print repr(rest)
if rest:
cmds = ['-c', rest]
return utils.getProcessOutput(shell, cmds, errortoo=1)
else:
os.system(shell)
# accessory functions
def _remoteGlob(self, fullPath):
log.msg('looking up %s' % fullPath)
head, tail = os.path.split(fullPath)
if '*' in tail or '?' in tail:
glob = 1
else:
glob = 0
if tail and not glob: # could be file or directory
# try directory first
d = self.client.openDirectory(fullPath)
d.addCallback(self._cbOpenList, '')
d.addErrback(self._ebNotADirectory, head, tail)
else:
d = self.client.openDirectory(head)
d.addCallback(self._cbOpenList, tail)
return d
def _cbOpenList(self, directory, glob):
files = []
d = directory.read()
d.addBoth(self._cbReadFile, files, directory, glob)
return d
def _ebNotADirectory(self, reason, path, glob):
d = self.client.openDirectory(path)
d.addCallback(self._cbOpenList, glob)
return d
def _cbReadFile(self, files, l, directory, glob):
if not isinstance(files, failure.Failure):
if glob:
l.extend([f for f in files if fnmatch.fnmatch(f[0], glob)])
else:
l.extend(files)
d = directory.read()
d.addBoth(self._cbReadFile, l, directory, glob)
return d
else:
reason = files
reason.trap(EOFError)
directory.close()
return l
def _abbrevSize(self, size):
# from http://mail.python.org/pipermail/python-list/1999-December/018395.html
_abbrevs = [
(1<<50L, 'PB'),
(1<<40L, 'TB'),
(1<<30L, 'GB'),
(1<<20L, 'MB'),
(1<<10L, 'kb'),
(1, '')
]
for factor, suffix in _abbrevs:
if size > factor:
break
return '%.1f' % (size/factor) + suffix
def _abbrevTime(self, t):
if t > 3600: # 1 hour
hours = int(t / 3600)
t -= (3600 * hours)
mins = int(t / 60)
t -= (60 * mins)
return "%i:%02i:%02i" % (hours, mins, t)
else:
mins = int(t/60)
t -= (60 * mins)
return "%02i:%02i" % (mins, t)
def _printProgessBar(self, f, startTime):
diff = time.time() - startTime
total = f.total
try:
winSize = struct.unpack('4H',
fcntl.ioctl(0, tty.TIOCGWINSZ, '12345679'))
except IOError:
winSize = [None, 80]
speed = total/diff
if speed:
timeLeft = (f.size - total) / speed
else:
timeLeft = 0
front = f.name
back = '%3i%% %s %sps %s ' % ((total/f.size)*100, self._abbrevSize(total),
self._abbrevSize(total/diff), self._abbrevTime(timeLeft))
spaces = (winSize[1] - (len(front) + len(back) + 1)) * ' '
self.transport.write('\r%s%s%s' % (front, spaces, back))
def _getFilename(self, line):
line.lstrip()
if not line:
return None, ''
if line[0] in '\'"':
ret = []
line = list(line)
try:
for i in range(1,len(line)):
c = line[i]
if c == line[0]:
return ''.join(ret), ''.join(line[i+1:]).lstrip()
elif c == '\\': # quoted character
del line[i]
if line[i] not in '\'"\\':
raise IndexError, "bad quote: \\%s" % line[i]
ret.append(line[i])
else:
ret.append(line[i])
except IndexError:
raise IndexError, "unterminated quote"
ret = line.split(None, 1)
if len(ret) == 1:
return ret[0], ''
else:
return ret
StdioClient.__dict__['cmd_?'] = StdioClient.cmd_HELP
class SSHConnection(connection.SSHConnection):
def serviceStarted(self):
self.openChannel(SSHSession())
class SSHSession(channel.SSHChannel):
name = 'session'
def channelOpen(self, foo):
log.msg('session %s open' % self.id)
if self.conn.options['subsystem'].startswith('/'):
request = 'exec'
else:
request = 'subsystem'
d = self.conn.sendRequest(self, request, \
common.NS(self.conn.options['subsystem']), wantReply=1)
d.addCallback(self._cbSubsystem)
d.addErrback(_ebExit)
def _cbSubsystem(self, result):
self.client = filetransfer.FileTransferClient()
self.client.makeConnection(self)
self.dataReceived = self.client.dataReceived
f = None
if self.conn.options['batchfile']:
fn = self.conn.options['batchfile']
if fn != '-':
f = file(fn)
self.stdio = stdio.StandardIO(StdioClient(self.client, f))
def extReceived(self, t, data):
if t==connection.EXTENDED_DATA_STDERR:
log.msg('got %s stderr data' % len(data))
sys.stderr.write(data)
sys.stderr.flush()
def eofReceived(self):
log.msg('got eof')
self.stdio.closeStdin()
def closeReceived(self):
log.msg('remote side closed %s' % self)
self.conn.sendClose(self)
def closed(self):
try:
reactor.stop()
except:
pass
def stopWriting(self):
self.stdio.pauseProducing()
def startWriting(self):
self.stdio.resumeProducing()
if __name__ == '__main__':
run()
| 34.09125
| 99
| 0.549078
|
bb6595f7d0a609f9cafb99fe912de0d1ab2616a4
| 537
|
py
|
Python
|
kendama/migrations/0003_auto_20200701_2227.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | null | null | null |
kendama/migrations/0003_auto_20200701_2227.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | 13
|
2021-02-13T20:15:18.000Z
|
2022-03-11T23:57:07.000Z
|
kendama/migrations/0003_auto_20200701_2227.py
|
amin-da71/Benbb96
|
0c9e37425d0665e403ba6fecf0c4b17669c29ada
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.13 on 2020-07-01 20:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kendama', '0002_historicalcomboplayer_historicaltrickplayer'),
]
operations = [
migrations.AlterField(
model_name='trickplayer',
name='trick',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='trick_players', to='kendama.KendamaTrick'),
),
]
| 26.85
| 138
| 0.674115
|
da50330dac86e5abd82a5469c858058afb111e90
| 11,118
|
py
|
Python
|
ms3/app.py
|
skoobe/ms3
|
e88dedbbe6d36dee58e36503c06cc17746c50db1
|
[
"MIT"
] | 7
|
2015-09-15T23:03:12.000Z
|
2021-11-08T11:21:21.000Z
|
ms3/app.py
|
skoobe/ms3
|
e88dedbbe6d36dee58e36503c06cc17746c50db1
|
[
"MIT"
] | null | null | null |
ms3/app.py
|
skoobe/ms3
|
e88dedbbe6d36dee58e36503c06cc17746c50db1
|
[
"MIT"
] | 6
|
2015-09-15T23:03:14.000Z
|
2021-02-28T15:49:05.000Z
|
"""
The Tornado application
"""
import os
import errno
import shutil
import socket
import hashlib
import logging
import urlparse
import tornado.web
import tornado.ioloop
import tornado.httpserver
from tornado.options import options, define
import ms3.general_options as general_options
from ms3.commands import (
Bucket, ListAllMyBucketsResponse, xml_string,
ListBucketResponse, ListBucketVersionsResponse,
VersioningConfigurationResponse, CopyObjectResponse)
define("port", default=9009, type=int, metavar="PORT",
help="Port on which we run this server (usually https port)")
define("internal_ssl", default=False, type=bool, metavar="True|False",
help="Use internal SSL")
define("keyfile", default="certs/key.pem", type=str,
help="Key File", metavar="PATH")
define("certfile", default="certs/cert.pem", type=str,
help="Certificate File", metavar="PATH")
define("cafile", default="certs/ca.pem", type=str,
help="CA Certificate File", metavar="PATH")
_logger = logging.getLogger(__name__)
class BaseHandler(tornado.web.RequestHandler):
""" Common functionality for all handlers """
@property
def datadir(self):
return self.application.datadir
def echo(self):
""" Debug function for a request """
self.set_header('Content-Type', 'text/plain')
request = self.request
_logger.debug("Request headers")
for key, value in request.headers.iteritems():
_logger.debug("\t%s: %s", key, value)
_logger.debug("Request arguments")
for key, value in self.request.arguments.iteritems():
_logger.debug("\t%s: %s" % (key, value))
props = ["method", "uri", "body"]
for key in props:
_logger.debug("%s: %s", key.title(), getattr(request, key))
def has_section(self, section):
"""
Check if the request has as query argument the specified section
"""
args = self.request.uri.split("?")
if args and len(args) > 1 and section in args[1]:
return True
return False
def has_header(self, header):
""" Check if the request has a specified header """
return header in self.request.headers
def get_header(self, header):
""" Get the value of the specified header """
return self.request.headers[header]
def get_bucket(self, name):
"""
Helper for getting a bucket.
Sends 404 back if the bucket is not found
"""
try:
return Bucket(name, self.datadir)
except OSError as exception:
_logger.warn(exception)
self.send_error(404)
def render_xml(self, result):
""" Helper for rendering the response """
self.write(xml_string(result.xml()))
self.finish()
class CatchAllHandler(BaseHandler):
""" Debug handler for inspecting requests """
def get(self):
self.echo()
def post(self):
self.echo()
class BucketHandler(BaseHandler):
""" Handle for GET/PUT/DELETE operations on buckets """
def get(self, name):
bucket = self.get_bucket(name)
if not bucket:
return
result = None
prefix = self.get_argument("prefix", None)
if self.has_section("versioning"):
result = VersioningConfigurationResponse(bucket)
elif self.has_section("versions"):
result = ListBucketVersionsResponse(
bucket, bucket.list_versions(prefix=prefix))
else:
result = ListBucketResponse(
bucket, bucket.list(prefix=prefix))
self.render_xml(result)
def head(self, name):
self.set_status(200)
def put(self, name):
if self.has_section("versioning"):
bucket = self.get_bucket(name)
if not bucket:
return
if '<Status>Enabled</Status>' in self.request.body:
bucket.enable_versioning()
else:
bucket.disable_versioning()
else:
bucket = Bucket.create(name, self.datadir)
if not bucket:
_logger.warn("Could not create bucket %s", name)
self.send_error(409)
return
self.echo()
def delete(self, name):
bucket = self.get_bucket(name)
if not bucket:
return
bucket.delete()
self.set_status(204)
class ListAllMyBucketsHandler(BaseHandler):
""" Handler for listing all buckets """
def get(self):
result = ListAllMyBucketsResponse(Bucket.get_all_buckets(self.datadir))
self.render_xml(result)
def delete(self):
shutil.rmtree(options.datadir, ignore_errors=True)
try:
os.makedirs(options.datadir)
except (IOError, OSError):
pass
class ObjectHandler(BaseHandler):
""" Handle for GET/PUT/HEAD/DELETE on objects """
def get(self, name, key):
version_id = self.get_argument("versionId", None)
bucket = self.get_bucket(name)
if not bucket:
return
entry = bucket.get_entry(key, version_id=version_id)
if not entry:
self.send_error(404)
else:
entry.set_headers(self)
self.write(entry.read())
def put(self, name, key):
bucket = self.get_bucket(name)
if not bucket:
return
if not self.request.body:
if self.has_header("x-amz-copy-source"):
source_name, key_name = (self.get_header("x-amz-copy-source").
split("/", 1))
source = self.get_bucket(source_name)
if not source:
return
version_id = None
if "?" in key_name:
key_name, args = key_name.split("?", 1)
args = urlparse.parse_qs(args)
if "versionId" in args:
version_id = args["versionId"][0]
entry = source.get_entry(
key_name, version_id=version_id)
if not entry or entry.size == 0:
_logger.warn("Could not find source entry or size is 0"
" for %s/%s", source_name, key_name)
self.send_error(404)
return
entry = bucket.copy_entry(key, entry)
if entry:
response = CopyObjectResponse(entry)
self.render_xml(response)
else:
_logger.warn("Not accepting 0 bytes files")
self.set_header('ETag', '"%s"' % hashlib.md5("").hexdigest())
else:
entry = bucket.set_entry(key, self.request.body)
self.set_header('ETag', '"%s"' % entry.etag)
def head(self, name, key):
version_id = self.get_argument("versionId", None)
bucket = self.get_bucket(name)
if not bucket:
return
entry = bucket.get_entry(key, version_id=version_id)
if not entry:
self.send_error(404)
else:
self.set_header('ETag', '"%s"' % entry.etag)
def delete(self, name, key):
version_id = self.get_argument("versionId", None)
bucket = self.get_bucket(name)
if not bucket:
return
bucket.delete_entry(key, version_id=version_id)
self.set_status(204)
def fix_TCPServer_handle_connection():
""" Monkey-patching tornado to increase the maxium file size to 1.5 GB """
import tornado.netutil
from tornado.iostream import SSLIOStream, IOStream
import ssl
max_buffer_size = 1536 * 1024 * 1024 # 1.5GB
read_chunk_size = 64 * 1024
def _handle_connection(self, connection, address):
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl.wrap_socket(connection,
server_side=True,
do_handshake_on_connect=False,
**self.ssl_options)
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop,
max_buffer_size=max_buffer_size,
read_chunk_size=read_chunk_size)
else:
stream = IOStream(connection, io_loop=self.io_loop,
max_buffer_size=max_buffer_size,
read_chunk_size=read_chunk_size)
self.handle_stream(stream, address)
except Exception:
_logger.error("Error in connection callback", exc_info=True)
tornado.netutil.TCPServer._handle_connection = _handle_connection
class MS3App(tornado.web.Application):
""" """
def __init__(self, args=None, debug=False):
general_options.parse_options(args=args)
handlers = [
(r"/", ListAllMyBucketsHandler),
(r"/([^/]+)/", BucketHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/.*", CatchAllHandler)
]
settings = {
'debug': debug or options.debug
}
self.datadir = options.datadir
if not os.path.isabs(self.datadir):
self.datadir = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..",
self.datadir))
if not os.path.exists(self.datadir):
try:
os.makedirs(self.datadir)
except (OSError, IOError) as exception:
_logger.warn("Tried to create %s: %s", self.datadir, exception)
tornado.web.Application.__init__(self, handlers, **settings)
fix_TCPServer_handle_connection()
def run(args=None):
""" Helper for running the app """
app = MS3App(args=args)
ssl_options = None
if options.internal_ssl:
ssl_options = {
'keyfile': options.keyfile,
'certfile': options.certfile,
'ca_certs': options.cafile
}
http_server = tornado.httpserver.HTTPServer(app, xheaders=True,
ssl_options=ssl_options)
http_server.listen(options.port)
_logger.info("Using configuration file %s", options.config)
_logger.info("Using data directory %s", app.datadir)
_logger.info("Starting up on port %s", options.port)
instance = tornado.ioloop.IOLoop().instance()
instance.start()
if __name__ == "__main__":
run()
| 34
| 79
| 0.569707
|
0ce8758272f64585d360a877a54a320ef13ae78d
| 3,579
|
py
|
Python
|
unittests/tickers/test_log2.py
|
xxao/pero
|
a7f0c84fae0b21fe120204e798bd61cdab3a125d
|
[
"MIT"
] | 13
|
2019-07-15T17:51:21.000Z
|
2022-03-15T06:13:43.000Z
|
unittests/tickers/test_log2.py
|
xxao/pero
|
a7f0c84fae0b21fe120204e798bd61cdab3a125d
|
[
"MIT"
] | 1
|
2021-12-29T00:46:44.000Z
|
2022-01-21T16:18:48.000Z
|
unittests/tickers/test_log2.py
|
xxao/pero
|
a7f0c84fae0b21fe120204e798bd61cdab3a125d
|
[
"MIT"
] | 3
|
2020-09-27T14:31:45.000Z
|
2022-01-22T14:28:15.000Z
|
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
import unittest
import pero
class TestCase(unittest.TestCase):
"""Test case for logarithmic ticker with base 2."""
def test_major_ticks(self):
"""Tests whether major ticks are generated works correctly."""
# init ticker
ticker = pero.LogTicker(base=2, major_count=7)
# test above one
ticker(start=1.1, end=0.9e3)
ticks = ticker.major_ticks()
self.assertEqual(ticks, (2, 4, 8, 16, 32, 64, 128, 256, 512))
# test one
ticker(start=1, end=0.9e3)
ticks = ticker.major_ticks()
self.assertEqual(ticks, (1, 2, 4, 8, 16, 32, 64, 128, 256, 512))
# test below one
ticker(start=0.1, end=0.9)
ticks = ticker.major_ticks()
self.assertEqual(ticks, (0.125, 0.25, 0.5))
# test cross one
ticker(start=0.1, end=9)
ticks = ticker.major_ticks()
self.assertEqual(ticks, (0.125, 0.25, 0.5, 1, 2, 4, 8))
# test condensed
ticker(start=1, end=1e7)
ticks = ticker.major_ticks()
self.assertEqual(ticks, (1, 16, 256, 4096, 65536, 1048576))
# test flipped
ticker(start=9, end=0.1)
ticks = ticker.major_ticks()
self.assertEqual(ticks, (8, 4, 2, 1, 0.5, 0.25, 0.125))
def test_minor_ticks(self):
"""Tests whether minor ticks are generated correctly."""
# init ticker
ticker = pero.LogTicker(base=2, major_count=7, minor_count=4)
# test above one
ticker(start=1.1, end=0.9e3)
ticks = ticker.minor_ticks()
self.assertEqual(ticks, (2, 4, 8, 16, 32, 64, 128, 256, 512))
# test one
ticker(start=1, end=0.9e3)
ticks = ticker.minor_ticks()
self.assertEqual(ticks, (1, 2, 4, 8, 16, 32, 64, 128, 256, 512))
# test below one
ticker(start=0.1, end=0.9)
ticks = ticker.minor_ticks()
self.assertEqual(ticks, (0.125, 0.25, 0.5))
# test cross one
ticker(start=0.1, end=9)
ticks = ticker.minor_ticks()
self.assertEqual(ticks, (0.125, 0.25, 0.5, 1, 2, 4, 8))
# test condensed
ticker(start=1, end=1e4)
ticks = ticker.minor_ticks()
self.assertEqual(ticks, (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192))
# test flipped
ticker(start=9, end=0.1)
ticks = ticker.minor_ticks()
self.assertEqual(ticks, (8, 4, 2, 1, 0.5, 0.25, 0.125))
def test_beautify(self):
"""Tests whether beautify function works correctly."""
# init ticker
ticker = pero.LogTicker(base=2, major_count=7)
# test above one
beautified = ticker.beautify(1.1, 0.9e3)
self.assertEqual(beautified, (1, 1024))
# test one
beautified = ticker.beautify(1, 0.9e3)
self.assertEqual(beautified, (1, 1024))
# test below one
beautified = ticker.beautify(0.1, 0.9)
self.assertEqual(beautified, (0.0625, 1))
# test cross one
beautified = ticker.beautify(0.9, 1.1e3)
self.assertEqual(beautified, (0.5, 2048))
# test flipped
beautified = ticker.beautify(0.9e3, 0.9)
self.assertEqual(beautified, (1024, 0.5))
# run test case
if __name__ == "__main__":
unittest.main(verbosity=2)
| 30.853448
| 96
| 0.54736
|
fbc8de86c0b7d5e87060563fe21579c1924ae928
| 4,692
|
py
|
Python
|
scripts/physics/atom.py
|
Hyperengined/Python-for-Numworks
|
16a72e946dacc764b825a3a2efbc97a4e5cf029e
|
[
"MIT"
] | 5
|
2020-11-21T11:25:40.000Z
|
2021-05-27T08:27:54.000Z
|
scripts/physics/atom.py
|
Hyperengined/Python-for-Numworks
|
16a72e946dacc764b825a3a2efbc97a4e5cf029e
|
[
"MIT"
] | 6
|
2021-03-12T17:14:16.000Z
|
2021-10-31T10:37:17.000Z
|
scripts/physics/atom.py
|
Hyperengined/Python-for-Numworks
|
16a72e946dacc764b825a3a2efbc97a4e5cf029e
|
[
"MIT"
] | 6
|
2021-04-09T09:26:32.000Z
|
2021-10-31T10:19:19.000Z
|
def atom(x):
resp = ""
if x == 1:
resp = "Hydrogène [H]\nZ = 1\nM(H) = 1,008g/mol\nStructure électronique :\n1s1"
elif x == 2:
resp = "Hélium [He]\nZ = 2\nM(He) = 4,0026g/mol\nStructure électronique :\n1s2"
elif x == 3:
resp = "Lithium [Li]\nZ = 3\nM(Li) = 6,94g/mol\nStructure électronique :\n1s2 2s1"
elif x == 4:
resp = "Béryllium [Be]\nZ = 4\nM(Be) = 9,0122g/mol\nStructure électronique :\n1s2 2s2"
elif x == 5:
resp = "Bore [B]\nZ = 5\nM(B) = 10,81g/mol\nStructure électronique :\n1s2 2s2 2p1"
elif x == 6:
resp = "Carbone [C]\nZ = 6\nM(C) = 12,011g/mol\nStructure électronique :\n1s2 2s2 2p2"
elif x == 7:
resp = "Azote [N]\nZ = 7\nM(N) = 14,007g/mol\nStructure électronique :\n1s2 2s2 2p3"
elif x == 8:
resp = "Oxygène [O]\nZ = 8\nM(O) = 15,999g/mol\nStructure électronique :\n1s2 2s2 2p4"
elif x == 9:
resp = "Fluor [F]\nZ = 9\nM(F) = 18,998g/mol\nStructure électronique :\n1s2 2s2 2p5"
elif x == 10:
resp = "Néon [Ne]\nZ = 10\nM(Ne) = 20,180g/mol\nStructure électronique :\n1s2 2s2 2p6"
elif x == 11:
resp = "Sodium [Na]\nZ = 11\nM(Na) = 22,990g/mol\nStructure électronique :\n1s2 2s2 2p6 3s1"
elif x == 12:
resp = "Magnésium [Mg]\nZ = 12\nM(Mg) = 24,305g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2"
elif x == 13:
resp = "Aluminium [Al]\nZ = 13\nM(Al) = 26,982g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p1"
elif x == 14:
resp = "Silicium [Si]\nZ = 14\nM(Si) = 28,085g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p2"
elif x == 15:
resp = "Phosphore [P]\nZ = 15\nM(P) = 30,974g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p3"
elif x == 16:
resp = "Soufre [F]\nZ = 16\nM(F) = 32,06g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p4"
elif x == 17:
resp = "Chlore [Cl]\nZ = 17\nM(Cl) = 35,45g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p5"
elif x == 18:
resp = "Argon [Ar]\nZ = 18\nM(Ar) = 39,948g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6"
elif x == 19:
resp = "Potassium [K]\nZ = 19\nM(K) = 39,098g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s1"
elif x == 20:
resp = "Calcium [Ca]\nZ = 20\nM(Ca) = 40,078g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2"
elif x == 21:
resp = "Scandium [Sc]\nZ = 21\nM(Sc) = 44,956g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d1"
elif x == 22:
resp = "Titane [Ti]\nZ = 22\nM(Ti) = 47,867g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d2"
elif x == 23:
resp = "Vanadium [V]\nZ = 23\nM(V) = 50,942g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d3"
elif x == 24:
resp = "Chrome [Cr]\nZ = 24\nM(Cr) = 51,996g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s1 3d5"
elif x == 25:
resp = "Manganèse [Mn]\nZ = 25\nM(Mn) = 54,938g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d5"
elif x == 26:
resp = "Fer [Fe]\nZ = 26\nM(Fe) = 55,845g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d6"
elif x == 27:
resp = "Cobalt [Co]\nZ = 27\nM(Co) = 58,933g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d7"
elif x == 28:
resp = "Nickel [Ni]\nZ = 28\nM(Ni) = 58,693g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d8\nou 1s2 2s2 2p6 3s2 3p6 4s1 3d9"
elif x == 29:
resp = "Cuivre [Cu]\nZ = 29\nM(Cu) = 63,546g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s1 3d10"
elif x == 30:
resp = "Zinc [Zn]\nZ = 30\nM(Zn) = 65,38g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10"
elif x == 31:
resp = "Gallium [Ga]\nZ = 31\nM(Ga) = 69,723g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10 4p1"
elif x == 32:
resp = "Germanium [Ge]\nZ = 32\nM(Ge) = 72,630g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10 4p2"
elif x == 33:
resp = "Arsenic [As]\nZ = 33\nM(As) = 74,922g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10 4p3"
elif x == 34:
resp = "Sélénium [Se]\nZ = 34\nM(Se) = 78,971g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10 4p4"
elif x == 35:
resp = "Brome [Br]\nZ = 35\nM(Br) = 79,904g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10 4p5"
elif x == 36:
resp = "Krypton [Kr]\nZ = 36\nM(Kr) = 83,798g/mol\nStructure électronique :\n1s2 2s2 2p6 3s2 3p6 4s2 3d10 4p6"
else:
resp = "aled"
return resp
print(atom(int(input("atome Z = "))))
| 58.65
| 145
| 0.58994
|
8c86169dcaffae03ccf2dac87edaa7664915da4d
| 9,430
|
py
|
Python
|
submission_criteria/concordance.py
|
CSP197/submission-criteria
|
3a584ca4ac452b429651bff7266c52db560f6c7b
|
[
"Apache-2.0"
] | null | null | null |
submission_criteria/concordance.py
|
CSP197/submission-criteria
|
3a584ca4ac452b429651bff7266c52db560f6c7b
|
[
"Apache-2.0"
] | null | null | null |
submission_criteria/concordance.py
|
CSP197/submission-criteria
|
3a584ca4ac452b429651bff7266c52db560f6c7b
|
[
"Apache-2.0"
] | null | null | null |
# System
"""Concordance Checking."""
import logging
import os
import functools
# Third Party
from sklearn.cluster import MiniBatchKMeans
from scipy.stats import ks_2samp
import numpy as np
import pandas as pd
# First Party
from submission_criteria import common
def has_concordance(P1, P2, P3, c1, c2, c3, threshold=0.12):
"""Checks that the clustered submission data conforms to a concordance threshold
Paramters:
----------
P1 : ndarray
Sorted validation submission probabilities based on the id
P2 : ndarray
Sorted test submission probabilities based on the id
P3 : ndarray
Sorted live submission probabilities based on the id
c1 : ndarray
Clustered validation from the tournament data
c2 : ndarray
Clustered test from the tournament data
c3 : ndarray
Clustered live from the tournament data
threshold : float, optional, default: 0.12
The threshold in which our mean ks_score has to be under to have "concordance"
Returns:
--------
concordance : bool
Boolean value of the clustered submission data having concordance
"""
ks = []
for i in set(c1):
ks_score = max(
ks_2samp(P1.reshape(-1)[c1 == i],
P2.reshape(-1)[c2 == i])[0],
ks_2samp(P1.reshape(-1)[c1 == i],
P3.reshape(-1)[c3 == i])[0],
ks_2samp(P3.reshape(-1)[c3 == i],
P2.reshape(-1)[c2 == i])[0])
ks.append(ks_score)
logging.getLogger().info("Noticed score {}".format(np.mean(ks)))
return np.mean(ks) < threshold
def make_clusters(X, X_1, X_2, X_3):
"""Split submission data into 3 clusters using K-Means clustering
Parameters:
-----------
X: ndarray
tournament data for the competition round
X_1: ndarray
sorted validation data ids from tournament data
X_2: ndarray
sorted test ids data from tournament data
X_3: ndarray
sorted live ids data from tournament data
Returns:
--------
c1: nparray
Clustered validation data
c2: nparray
Clustered test data
c3: nparray
Cluster live data
"""
logging.getLogger().info("New competition, clustering dataset")
kmeans = MiniBatchKMeans(n_clusters=5, random_state=1337)
kmeans.fit(X)
c1, c2, c3 = kmeans.predict(X_1), kmeans.predict(X_2), kmeans.predict(X_3)
logging.getLogger().info("Finished clustering")
return c1, c2, c3
@functools.lru_cache(maxsize=2)
def get_ids(filemanager, tournament_number, round_number):
"""Gets the ids from submission data based on the round_number
Parameters:
-----------
filemanager : FileManager
S3 Bucket data access object for querying competition datasets
round_number : int
The numerical id of the competition
Returns:
--------
val : list
List of all ids in the 'validation' dataset
test : list
List of all ids in the 'test' dataset
live : list
List of all ids in the 'live' dataset
"""
extract_dir = filemanager.download_dataset(tournament_number, round_number)
tournament = pd.read_csv(
os.path.join(extract_dir, "numerai_tournament_data.csv"))
val = tournament[tournament["data_type"] == "validation"]
test = tournament[tournament["data_type"] == "test"]
live = tournament[tournament["data_type"] == "live"]
return list(val["id"]), list(test["id"]), list(live["id"])
def get_sorted_split(data, val_ids, test_ids, live_ids):
"""Split the competition data into validation, test, and live data sets in a sorted fashion
Parameters:
-----------
data : DataFrame
Tournament data for the competition round
val_ids : list
List of all validation data ids
test_ids : list
List of all test data ids
live_ids : list
List of all live data ids
Returns:
--------
validation : ndarray
Validation data features sorted by id
test : ndarray
Test data features sorted by id
live : ndarray
Live data features sorted by id
"""
validation = data[data["id"].isin(val_ids)]
test = data[data["id"].isin(test_ids)]
live = data[data["id"].isin(live_ids)]
validation = validation.sort_values("id")
test = test.sort_values("id")
live = live.sort_values("id")
if any(["feature" in c for c in list(validation)]):
f = [c for c in list(validation) if "feature" in c]
else:
f = ["probability"]
validation = validation[f]
test = test[f]
live = live[f]
return validation.as_matrix(), test.as_matrix(), live.as_matrix()
@functools.lru_cache(maxsize=2)
def get_competition_variables(tournament_number, round_number, filemanager):
"""Return the K-Means Clustered tournament data for the competition round
Parameters:
-----------
round_id : string
UUID of the competition round of the tournament
db_manager : DatabaseManager
DB data access object that has read and write functions to NoSQL DB
filemanager : FileManager
S3 Bucket data access object for querying competition datasets
Returns:
--------
variables : dictionary
Holds clustered tournament data and the round_number
"""
extract_dir = filemanager.download_dataset(tournament_number, round_number)
training = pd.read_csv(
os.path.join(extract_dir, "numerai_training_data.csv"))
tournament = pd.read_csv(
os.path.join(extract_dir, "numerai_tournament_data.csv"))
val_ids, test_ids, live_ids = get_ids(filemanager, tournament_number,
round_number)
return get_competition_variables_from_df(
round_number, training, tournament, val_ids, test_ids, live_ids)
def get_competition_variables_from_df(
round_number: str, training: pd.DataFrame, tournament: pd.DataFrame,
val_ids: list, test_ids: list, live_ids: list) -> dict:
f = [c for c in list(tournament) if "feature" in c]
# TODO the dropna is a hack workaround for https://github.com/numerai/api-ml/issues/68
X = training[f].dropna().as_matrix()
X = np.append(X, tournament[f].as_matrix(), axis=0)
X_1, X_2, X_3 = get_sorted_split(tournament, val_ids, test_ids, live_ids)
c1, c2, c3 = make_clusters(X, X_1, X_2, X_3)
variables = {
"round_number": round_number,
"cluster_1": c1,
"cluster_2": c2,
"cluster_3": c3,
}
return variables
def get_submission_pieces(submission_id, tournament, round_number, db_manager,
filemanager):
"""Get validation, test, and live ids sorted from submission_id
Parameters:
-----------
submission_id : string
ID of the submission
round_number : int
Numerical ID of the competition round of the tournament
db_manager : DatabaseManager
DB data access object that has read and write functions to NoSQL DB
filemanager : FileManager
S3 Bucket data access object for querying competition datasets
Returns:
--------
validation : ndarray
Sorted validation ids from submission data
tests : ndarray
Sorted test ids from submission data
live : ndarray
Sorted live ids from submission data
"""
s3_file, _ = common.get_filename(db_manager.postgres_db, submission_id)
data = filemanager.read_csv(s3_file)
val_ids, test_ids, live_ids = get_ids(filemanager, tournament,
round_number)
validation, tests, live = get_sorted_split(data, val_ids, test_ids,
live_ids)
return validation, tests, live
def submission_concordance(submission, db_manager, filemanager):
"""Determine if a submission is concordant and write the result to DB
Parameters:
-----------
submission : dictionary
Submission data that holds the ids of submission and competition round
db_manager : DatabaseManager
DB data access object that has read and write functions to NoSQL DB
filemanager : FileManager
S3 Bucket data access object for querying competition datasets
"""
tournament, round_number, _dataset_path = common.get_round(
db_manager.postgres_db, submission["submission_id"])
clusters = get_competition_variables(tournament, round_number, filemanager)
P1, P2, P3 = get_submission_pieces(submission["submission_id"], tournament,
round_number, db_manager, filemanager)
c1, c2, c3 = clusters["cluster_1"], clusters["cluster_2"], clusters[
"cluster_3"]
try:
concordance = has_concordance(P1, P2, P3, c1, c2, c3)
except IndexError:
# If we had an indexing error, that is because the round restart, and we need to try getting the new competition variables.
get_competition_variables.cache_clear()
clusters = get_competition_variables(tournament, round_number,
filemanager)
c1, c2, c3 = clusters["cluster_1"], clusters["cluster_2"], clusters[
"cluster_3"]
concordance = has_concordance(P1, P2, P3, c1, c2, c3)
db_manager.write_concordance(submission['submission_id'], concordance)
| 30.816993
| 131
| 0.649523
|
e36cf53d6a21939614e75e6301de0d1b20ec7305
| 9,786
|
py
|
Python
|
myprojectenv/lib/python3.5/site-packages/ansible/modules/storage/netapp/netapp_e_auth.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
myprojectenv/lib/python3.5/site-packages/ansible/modules/storage/netapp/netapp_e_auth.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
myprojectenv/lib/python3.5/site-packages/ansible/modules/storage/netapp/netapp_e_auth.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_auth
short_description: Sets or updates the password for a storage array.
description:
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
Services proxy. Note, all storage arrays do not have a Monitor or RO role.
version_added: "2.2"
author: Kevin Hulquest (@hulquest)
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
name:
description:
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
the ID instead.
required: False
ssid:
description:
- the identifier of the storage array in the Web Services Proxy.
required: False
set_admin:
description:
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
default: False
current_password:
description:
- The current admin password. This is not required if the password hasn't been set before.
required: False
new_password:
description:
- The password you would like to set. Cannot be more than 30 characters.
required: True
api_url:
description:
- The full API url.
- "Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API
- This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API
- This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
- name: Test module
netapp_e_auth:
name: trex
current_password: OldPasswd
new_password: NewPasswd
set_admin: yes
api_url: '{{ netapp_api_url }}'
api_username: '{{ netapp_api_username }}'
api_password: '{{ netapp_api_password }}'
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: "Password Updated Successfully"
'''
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_ssid(module, name, api_url, user, pwd):
count = 0
all_systems = 'storage-systems'
systems_url = api_url + all_systems
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd)
for system in data:
if system['name'] == name:
count += 1
if count > 1:
module.fail_json(
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
"Use the id instead")
else:
ssid = system['id']
else:
continue
if count == 0:
module.fail_json(msg="No storage array with the name %s was found" % name)
else:
return ssid
def get_pwd_status(module, ssid, api_url, user, pwd):
pwd_status = "storage-systems/%s/passwords" % ssid
url = api_url + pwd_status
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return data['readOnlyPasswordSet'], data['adminPasswordSet']
except HTTPError:
error = get_exception()
module.fail_json(msg="There was an issue with connecting, please check that your "
"endpoint is properly defined and your credentials are correct: %s" % str(error))
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
update_pwd = 'storage-systems/%s' % ssid
url = api_url + update_pwd
post_body = json.dumps(dict(storedPassword=pwd))
try:
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
url_password=api_pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err)))
return data
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
set_pass = "storage-systems/%s/passwords" % ssid
url = api_url + set_pass
if not current_password:
current_password = ""
post_body = json.dumps(
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
ignore_errors=True)
except:
err = get_exception()
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err)))
if rc == 422:
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
except Exception:
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
if int(rc) == 204:
return update_data
else:
module.fail_json(msg="%s:%s" % (rc, data))
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=False, type='str'),
ssid=dict(required=False, type='str'),
current_password=dict(required=False, no_log=True),
new_password=dict(required=True, no_log=True),
set_admin=dict(required=True, type='bool'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
required_one_of=[['name', 'ssid']])
name = module.params['name']
ssid = module.params['ssid']
current_password = module.params['current_password']
new_password = module.params['new_password']
set_admin = module.params['set_admin']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
if name:
ssid = get_ssid(module, name, api_url, user, pwd)
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
if admin_pwd and not current_password:
module.fail_json(
msg="Admin account has a password set. " +
"You must supply current_password in order to update the RO or Admin passwords")
if len(new_password) > 30:
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
success = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
new_password=new_password,
set_admin=set_admin)
module.exit_json(changed=True, msg="Password Updated Successfully", **success)
if __name__ == '__main__':
main()
| 35.32852
| 151
| 0.65839
|
fcd9e99d1440d1fc5264c6005393ba9e9669e7ea
| 41,183
|
py
|
Python
|
python/tvm/relay/op/nn/_nn.py
|
ANSHUMAN87/incubator-tvm
|
902e21bd4e975037020c3b6445e4c903c2490a22
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3
|
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
python/tvm/relay/op/nn/_nn.py
|
ANSHUMAN87/incubator-tvm
|
902e21bd4e975037020c3b6445e4c903c2490a22
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
python/tvm/relay/op/nn/_nn.py
|
ANSHUMAN87/incubator-tvm
|
902e21bd4e975037020c3b6445e4c903c2490a22
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
import topi
from topi.util import get_const_tuple
from .. import op as reg
from ..op import OpPattern, schedule_injective
from .._tensor import elemwise_shape_func
from ....api import convert
from ....hybrid import script
# relu
reg.register_schedule("nn.relu", schedule_injective)
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
@reg.register_schedule("nn.softmax")
def schedule_softmax(_, outputs, target):
"""Schedule definition of softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
schedule_broadcast = schedule_injective
@reg.register_schedule("nn.log_softmax")
def schedule_log_softmax(_, outputs, target):
"""Schedule definition of log_softmax"""
with target:
return topi.generic.schedule_softmax(outputs)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
@reg.register_compute("nn.dense")
def compute_dense(attrs, inputs, out_type, target):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi.nn.dense(inputs[0], inputs[1], None, out_dtype)]
@reg.register_schedule("nn.dense")
def schedule_dense(attrs, outputs, target):
"""Schedule definition of dense"""
with target:
return topi.generic.schedule_dense(outputs)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute('nn.fifo_buffer')
def compute_fifo_buffer(attrs, inputs, out_type, target):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int('axis'))]
@reg.register_schedule('nn.fifo_buffer')
def schedule_fifo_buffer(attrs, outputs, target):
with target:
return topi.generic.schedule_injective(outputs)
reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE)
# batch_matmul
@reg.register_compute("nn.batch_matmul")
def compute_batch_matmul(attrs, inputs, out_type, target):
"""Compute definition of batch_matmul"""
with target:
return [topi.nn.batch_matmul(inputs[0], inputs[1])]
@reg.register_schedule("nn.batch_matmul")
def schedule_batch_matmul(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_batch_matmul(outputs)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type, target):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
@reg.register_schedule("nn.sparse_dense")
def schedule_sparse_dense(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_sparse_dense(outputs)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type, target):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
@reg.register_schedule("nn.sparse_transpose")
def schedule_sparse_transpose(attrs, outputs, target):
"""Schedule definition of batch_matmul"""
with target:
return topi.generic.schedule_sparse_transpose(outputs)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# Conv1D
@reg.register_compute("nn.conv1d")
def compute_conv1d(attrs, inputs, out_type, target):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout in ["NCW", "NWC"]
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
return [topi.nn.conv1d(inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype)]
@reg.register_schedule("nn.conv1d")
def schedule_conv1d(attrs, outs, target):
"""Schedule definition of conv1d"""
layout = attrs.data_layout
with target:
if layout == "NCW":
return topi.generic.schedule_conv1d_ncw(outs)
elif layout == "NCW":
return topi.generic.schedule_conv1d_nwc(outs)
raise ValueError("No compatible schedule")
reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
def _find_conv2d_op(op):
"""Find the op with conv2d in its tag by traversing."""
if 'conv2d' in op.tag:
return op
for tensor in op.input_tensors:
op_ = _find_conv2d_op(tensor.op)
if op_ is not None:
return op_
return None
@reg.register_compute("nn.conv2d")
def compute_conv2d(attrs, inputs, out_type, target):
"""Compute definition of conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout in ["NCHW", "NHWC", "NCHW4c", "HWCN"]
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
def _get_out_depth():
weight_shape = get_const_tuple(inputs[1].shape)
# NHWC layout
if kernel_layout.startswith("HW"):
return weight_shape[2] * weight_shape[3]
# NCHW layout.
# in ARM CPU contrib_spatial_pack schedule, we will prepack weight layout
if len(weight_shape) == 4:
return weight_shape[0] * weight_shape[1]
else:
assert len(weight_shape) == 5
C, M, _, _, VC = weight_shape
return C * VC * M
if groups == 1:
out = topi.nn.conv2d(
inputs[0], inputs[1], strides, padding,
dilation, layout, out_dtype)
elif layout == "NCHW" and _get_out_depth() == groups:
out = topi.nn.depthwise_conv2d_nchw(
inputs[0], inputs[1], strides, padding, dilation, out_dtype)
elif layout == "NHWC" and kernel_layout == "HWOI" and _get_out_depth() == groups:
out = topi.nn.depthwise_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, dilation, out_dtype)
elif layout in ['NCHW', 'NCHW4c']:
out = topi.nn.group_conv2d_nchw(inputs[0], inputs[1], strides, padding, dilation, groups,
out_dtype)
else:
raise ValueError("not support arbitrary group number for now")
return [out]
@reg.register_schedule("nn.conv2d")
def schedule_conv2d(attrs, outs, target):
"""Schedule definition of conv2d"""
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
with target:
if groups == 1 and layout == "NCHW":
return topi.generic.schedule_conv2d_nchw(outs)
elif groups == 1 and layout == "NCHW4c":
return topi.generic.schedule_conv2d_nchw(outs)
elif groups == 1 and layout == "NHWC":
return topi.generic.schedule_conv2d_nhwc(outs)
elif groups == 1 and layout == "HWCN":
return topi.generic.schedule_conv2d_hwcn(outs)
elif groups != 1:
# collect in_channels to distinguish depthwise and group conv2d
op = _find_conv2d_op(outs[0].op)
assert op is not None
is_depthwise = 'depthwise' in op.tag
if is_depthwise:
if layout == "NCHW":
# TODO(leyuan, merrymercy, Huyuwei): fold depthwise topi into conv2d.
return topi.generic.schedule_depthwise_conv2d_nchw(outs)
if layout == "NHWC" and kernel_layout == "HWOI":
return topi.generic.schedule_depthwise_conv2d_nhwc(outs)
else:
if layout in ["NCHW", "NCHW4c"]:
return topi.generic.schedule_group_conv2d_nchw(outs)
raise ValueError("No compatible schedule")
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos):
"""Alternate the layout of conv2d"""
# pylint: disable=import-outside-toplevel
from ... import op
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, op)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layout):
"""Convert Layout pass registration for conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layout : str
The desired layout
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data_layout = attrs['data_layout']
kernel_layout = attrs['kernel_layout']
data, weight = inputs
assert desired_layout == 'NCHW', \
"Currently only transformation to NCHW layout is supported."
if desired_layout == 'NCHW':
new_attrs = dict(attrs)
new_attrs['data_layout'] = desired_layout
new_attrs['kernel_layout'] = 'OIHW'
if data_layout == 'NHWC' and kernel_layout == 'HWIO':
# Convert (NHWC, HWIO) to (NCHW, OIHW)
return relay.nn.conv2d(data, weight, **new_attrs)
if data_layout == 'NHWC' and kernel_layout == 'HWOI':
# Convert (NHWC, HWOI) to (NCHW, OIHW). Depthwise conv2d.
return relay.nn.conv2d(data, weight, **new_attrs)
return None
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d_transpose
@reg.register_compute("nn.conv2d_transpose")
def compute_conv2d_transpose(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
out = topi.nn.conv2d_transpose_nchw(
inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0, 0], [0, 0, output_padding[0], output_padding[1]])
return [out]
@reg.register_compute("nn.conv3d")
def compute_conv3d(attrs, inputs, out_type, target):
"""Compute definition of conv3d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout in ["NCDHW", "NDHWC"]
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
out = topi.nn.conv3d(
inputs[0], inputs[1], strides, padding,
dilation, layout, out_dtype)
else:
raise ValueError("not support arbitrary group number for now")
return [out]
@reg.register_schedule("nn.conv3d")
def schedule_conv3d(attrs, outs, target):
"""Schedule definition of conv3d"""
groups = attrs.groups
layout = attrs.data_layout
with target:
if groups == 1 and layout == "NCDHW":
return topi.generic.schedule_conv3d_ncdhw(outs)
elif groups == 1 and layout == "NDHWC":
return topi.generic.schedule_conv3d_ndhwc(outs)
raise ValueError("No compatible schedule")
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_schedule("nn.conv2d_transpose")
def schedule_conv2d_transpose(attrs, outs, target):
"""Schedule definition of conv2d_transpose"""
with target:
return topi.generic.schedule_conv2d_transpose_nchw(outs)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
"""Legalize conv2d_transpose op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
@reg.register_compute("nn.conv1d_transpose")
def compute_conv1d_transpose(attrs, inputs, out_dtype, target):
"""Compute definition of conv1d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
out = topi.nn.conv1d_transpose_ncw(
inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0], [0, 0, output_padding[0]])
return [out]
@reg.register_schedule("nn.conv1d_transpose")
def schedule_conv1d_transpose(attrs, outs, target):
"""Schedule definition of conv1d_transpose"""
with target:
return topi.generic.schedule_conv1d_transpose_ncw(outs)
reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_schedule("nn.bias_add", schedule_injective)
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool1d
@reg.register_schedule("nn.max_pool1d")
def schedule_max_pool1d(attrs, outs, target):
"""Schedule definition of max_pool1d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d
@reg.register_schedule("nn.max_pool2d")
def schedule_max_pool2d(attrs, outs, target):
"""Schedule definition of max_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool3d
@reg.register_schedule("nn.max_pool3d")
def schedule_max_pool3d(attrs, outs, target):
"""Schedule definition of max_pool3d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool1d
@reg.register_schedule("nn.avg_pool1d")
def schedule_avg_pool1d(attrs, outs, target):
"""Schedule definition of avg_pool1d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
@reg.register_schedule("nn.avg_pool2d")
def schedule_avg_pool2d(attrs, outs, target):
"""Schedule definition of avg_pool2d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool3d
@reg.register_schedule("nn.avg_pool3d")
def schedule_avg_pool3d(attrs, outs, target):
"""Schedule definition of avg_pool3d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)
reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
@reg.register_schedule("nn.max_pool2d_grad")
def schedule_max_pool2d_grad(attrs, outs, target):
"""Schedule definition of max_pool2d_grad"""
with target:
return topi.generic.schedule_pool_grad(outs)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
@reg.register_schedule("nn.avg_pool2d_grad")
def schedule_avg_pool2d_grad(attrs, outs, target):
"""Schedule definition of avg_pool2d_grad"""
with target:
return topi.generic.schedule_pool_grad(outs)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
@reg.register_schedule("nn.global_max_pool2d")
def schedule_global_max_pool2d(_, outs, target):
"""Schedule definition of global_max_pool2d"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
@reg.register_schedule("nn.global_avg_pool2d")
def schedule_global_avg_pool2d(_, outs, target):
"""Schedule definition of global_avg_pool2d"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_schedule("nn.leaky_relu", schedule_broadcast)
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_schedule("nn.prelu", schedule_broadcast)
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_schedule("nn.batch_flatten", schedule_broadcast)
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype, target):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis,
attrs.alpha, attrs.beta, attrs.bias)]
@reg.register_schedule("nn.lrn")
def schedule_lrn(attrs, outs, target):
"""Schedule definition of lrn"""
with target:
return topi.generic.schedule_lrn(outs)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# upsampling
reg.register_schedule("nn.upsampling", reg.schedule_injective)
def schedule_upsampling(_, outs, target):
"""Schedule definition of upsampling"""
with target:
return topi.generic.schedule_injective(outs)
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype, target):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
# upsampling3d
reg.register_schedule("nn.upsampling3d", reg.schedule_injective)
def schedule_upsampling3d(_, outs, target):
"""Schedule definition of upsampling3d"""
with target:
return topi.generic.schedule_injective(outs)
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype, target):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [topi.nn.upsampling3d(inputs[0], scale_d, scale_h, scale_w, layout, method,\
coordinate_transformation_mode)]
# pad
reg.register_schedule("nn.pad", schedule_broadcast)
# mirror_pad
reg.register_schedule("nn.mirror_pad", schedule_broadcast)
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype, target):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
# winograd related operators
@reg.register_compute("nn.contrib_conv2d_winograd_without_weight_transform")
def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_winograd_without_weight_transform"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.get_str("out_dtype")
tile_size = attrs.get_int("tile_size")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
out = topi.nn.conv2d_winograd_without_weight_transform(
inputs[0], inputs[1], strides, padding, dilation, data_layout,
out_dtype, tile_size)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_without_weight_transform")
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_without_weight_transform",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(
inputs[0], attrs.get_int('tile_size'))
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_weight_transform")
def schedule_contrib_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule definition of contrib_conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform",
OpPattern.OUT_ELEMWISE_FUSABLE)
# winograd nnpack related operators
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_without_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_without_weight_transform(
attrs, inputs, out_dtype, target):
"""Compute definition of conv2d_winograd_nnpack_without_weight_transform"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
# No bias
out = topi.nn.conv2d_winograd_nnpack_without_weight_transform(
inputs[0], inputs[1], None, strides, padding, dilation, data_layout,
out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_without_weight_transform")
def schedule_contrib_conv2d_winograd_nnpack_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_nnpack_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_without_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_without_weight_transform",
OpPattern.OPAQUE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype, target):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int('convolution_algorithm')
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def schedule_contrib_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule definition of contrib_conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform",
OpPattern.OPAQUE)
@reg.register_compute("nn.contrib_conv2d_NCHWc")
def compute_contrib_conv2d_NCHWc(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_NCHWc")
def schedule_contrib_conv2d_NCHWc(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc"""
with target:
return topi.generic.schedule_conv2d_NCHWc(outs)
reg.register_pattern("nn.contrib_conv2d_NCHWc",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_NCHWc_int8")
def compute_contrib_conv2d_NCHWc_int8(attrs, inputs, out_dtype, target):
"""Compute definition of conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.conv2d_NCHWc_int8(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_conv2d_NCHWc_int8")
def schedule_contrib_conv2d_NCHWc_int8(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc_int8"""
with target:
return topi.generic.schedule_conv2d_NCHWc_int8(outs)
reg.register_pattern("nn.contrib_conv2d_NCHWc_int8",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_depthwise_conv2d_NCHWc")
def compute_contrib_depthwise_conv2d_NCHWc(attrs, inputs, out_dtype, target):
"""Compute definition of depthwise conv2d NCHWc"""
# pylint: disable=assignment-from-no-return
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
out = topi.nn.depthwise_conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
data_layout, out_layout, out_dtype)
return [out]
@reg.register_schedule("nn.contrib_depthwise_conv2d_NCHWc")
def schedule_contrib_depthwise_conv2d_NCHWc(attrs, outs, target):
"""Schedule definition of contrib_conv2d_NCHWc"""
with target:
return topi.generic.schedule_depthwise_conv2d_NCHWc(outs)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc",
OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.deformable_conv2d")
def compute_deformable_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition of deformable_conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
with target:
out = topi.nn.deformable_conv2d_nchw(inputs[0], inputs[1], inputs[2], strides, padding,
dilation, deformable_groups, groups, out_dtype)
return [out]
@reg.register_schedule("nn.deformable_conv2d")
def schedule_deformable_conv2d(attrs, outs, target):
"""Schedule definition of deformable_conv2d"""
with target:
return topi.generic.schedule_deformable_conv2d_nchw(outs)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype, target):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
with target:
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type,
name)
return [out]
@reg.register_schedule("nn.bitpack")
def schedule_bitpack(attrs, outs, target):
with target:
return topi.generic.schedule_bitpack(outs)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
@reg.register_compute("nn.bitserial_conv2d")
def compute_bitserial_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
layout = attrs.data_layout
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
if layout == 'NCHW':
with target:
out = topi.nn.bitserial_conv2d_nchw(
inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)
elif layout == 'NHWC':
with target:
out = topi.nn.bitserial_conv2d_nhwc(
inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)
else:
raise ValueError("Data layout not supported.")
return [out]
@reg.register_schedule("nn.bitserial_conv2d")
def schedule_bitserial_conv2d(attrs, outs, target):
"""Schedule definition for bitserial conv2d."""
layout = attrs.data_layout
if layout == 'NCHW':
with target:
return topi.generic.schedule_bitserial_conv2d_nchw(outs)
elif layout == 'NHWC':
with target:
return topi.generic.schedule_bitserial_conv2d_nhwc(outs)
else:
raise ValueError("Data layout not supported.")
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.attrs.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitserial_dense
@reg.register_compute("nn.bitserial_dense")
def compute_bitserial_dense(attrs, inputs, out_type, target):
"""Compute definition of bitserial_dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi.nn.bitserial_dense(
inputs[0],
inputs[1],
data_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar)
]
@reg.register_schedule("nn.bitserial_dense")
def schedule_bitserial_dense(attrs, outputs, target):
"""Schedule definition of bitserial_dense"""
with target:
return topi.generic.schedule_bitserial_dense(outputs)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE)
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype, target):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype, target):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype, target):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_schedule("nn.depth_to_space", schedule_injective)
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype, target):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_schedule("nn.space_to_depth", schedule_injective)
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)
# shape func
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [_conv2d_NCHWc_shape_func(inputs[0], inputs[1],
convert(strides), convert(padding),
convert(dilation), convert(oc_bn))]
@script
def _pool2d_shape_func(data_shape, pool_size, strides,
padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
"""
Shape function for pool2d op.
"""
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [_pool2d_shape_func(inputs[0], convert(pool_size),
convert(strides), convert(padding),
convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
"""
Shape function for global pool2d op.
"""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
"""
Shape function for batch_flatten op.
"""
return [_batch_flatten_shape_func(inputs[0])]
@script
def _dense_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0]
return out
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_dense_shape_func(inputs[0], inputs[1])]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
"""
Shape function for pad op.
"""
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
| 34.34779
| 102
| 0.70517
|
ea42b217ea8578dfe732cd41f143b41c75e53653
| 3,201
|
py
|
Python
|
setup.py
|
jeffnappi/crate-python
|
307742e2d954502886023fb648a3a9e87b3042bd
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jeffnappi/crate-python
|
307742e2d954502886023fb648a3a9e87b3042bd
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jeffnappi/crate-python
|
307742e2d954502886023fb648a3a9e87b3042bd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from setuptools import setup, find_packages
import os
import re
requirements = [
'setuptools',
'urllib3',
'six'
]
def read(path):
return open(os.path.join(os.path.dirname(__file__), path)).read()
long_description = (
read('README.rst')
+ '\n' +
read('docs/client.txt')
+ '\n' +
read('docs/blobs.txt')
)
versionf_content = open("src/crate/client/__init__.py").read()
version_rex = r'^__version__ = [\'"]([^\'"]*)[\'"]$'
m = re.search(version_rex, versionf_content, re.M)
if m:
version = m.group(1)
else:
raise RuntimeError('Unable to find version string')
setup(
name='crate',
version=version,
url='https://github.com/crate/crate-python',
author='CRATE Technology GmbH',
author_email='office@crate.io',
package_dir={'': 'src'},
description='Crate Data Python client',
long_description=long_description,
platforms=['any'],
license='Apache License 2.0',
keywords='crate db api sqlalchemy',
packages=find_packages('src'),
namespace_packages=['crate'],
entry_points={
'sqlalchemy.dialects': [
'crate = crate.client.sqlalchemy:CrateDialect'
]
},
extras_require=dict(
test=['lovely.testlayers',
'mock>=1.0.1',
'zope.testing',
'zc.customdoctests>=1.0.1'],
sqlalchemy=['sqlalchemy>=0.8.2']
),
install_requires=requirements,
package_data={'': ['*.txt']},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Database'
],
)
| 32.333333
| 77
| 0.653233
|
e2035d7b90e819d5d850ac2a191baf0c260c3197
| 1,928
|
py
|
Python
|
cold_posterior_bnn/imdb/imdb_model.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
cold_posterior_bnn/imdb/imdb_model.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
cold_posterior_bnn/imdb/imdb_model.py
|
pedersor/google-research
|
6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras models for the IMDB task.
"""
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.models import Sequential
NUM_WORDS = 20000
SEQUENCE_LENGTH = 100
EMBEDDING_SIZE = 128
CNNLSTM_CELL_SIZE = 70
# no dropout
def cnn_lstm_nd(pfac,
max_features=NUM_WORDS,
maxlen=SEQUENCE_LENGTH,
lstm_cell_size=CNNLSTM_CELL_SIZE,
embedding_size=EMBEDDING_SIZE):
"""CNN-LSTM model, modified from Keras example."""
# From github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py
filters = 64
kernel_size = 5
pool_size = 4
model = Sequential()
model.add(pfac(Embedding(max_features, embedding_size, input_length=maxlen,
name='embedding')))
model.add(pfac(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1,
name='conv')))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(pfac(LSTM(lstm_cell_size, name='lstm')))
model.add(pfac(Dense(2, name='dense')))
return model
| 33.241379
| 77
| 0.688797
|
9b12b529a1fd64ac38c9d157a180aabfd7a2ff84
| 3,446
|
py
|
Python
|
Chapter02/misc/feed_forward_iris.py
|
PacktPublishing/Neural-Network-Programming-with-TensorFlow
|
6ab03d8bfc8b23217968e7f71b656e3afc8bd7a0
|
[
"MIT"
] | 26
|
2017-11-17T18:56:16.000Z
|
2022-03-03T13:25:44.000Z
|
Chapter02/misc/feed_forward_iris.py
|
PacktPublishing/Neural-Network-Programming-with-TensorFlow
|
6ab03d8bfc8b23217968e7f71b656e3afc8bd7a0
|
[
"MIT"
] | 2
|
2021-08-25T14:50:10.000Z
|
2022-02-09T23:30:51.000Z
|
Chapter02/misc/feed_forward_iris.py
|
PacktPublishing/Neural-Network-Programming-with-TensorFlow
|
6ab03d8bfc8b23217968e7f71b656e3afc8bd7a0
|
[
"MIT"
] | 22
|
2017-11-16T05:16:38.000Z
|
2022-01-03T20:10:04.000Z
|
# Implementation of a simple MLP network with one hidden layer. Tested on the iris data set.
# Requires: numpy, sklearn>=0.18.1, tensorflow>=1.0
# NOTE: In order to make the code simple, we rewrite x * W_1 + b_1 = x' * W_1'
# where x' = [x | 1] and W_1' is the matrix W_1 appended with a new row with elements b_1's.
# Similarly, for h * W_2 + b_2
import tensorflow as tf
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
y = tf.matmul(h, w_2) # The \varphi function
return y
def get_iris_data():
""" Read the iris data set and split them into training and test sets """
#iris = datasets.load_iris()
from numpy import genfromtxt
data = genfromtxt('iris.csv', delimiter=',')
#data = iris["data"]
#np.savetxt("foo.csv", data, delimiter=",")
#target = iris["target"]
#np.savetxt("target.csv", target, delimiter=",")
target = genfromtxt('target.csv', delimiter=',').astype(int)
# Prepend the column of 1s for bias
N, M = data.shape
all_X = np.ones((N, M + 1))
all_X[:, 1:] = data
# Convert into one-hot vectors
num_labels = len(np.unique(target))
all_Y = np.eye(num_labels)[target] # One liner trick!
return train_test_split(all_X, all_Y, test_size=0.33, random_state=RANDOM_SEED)
def main():
train_X, test_X, train_y, test_y = get_iris_data()
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
h_size = 256 # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes (3 iris flowers)
# Symbols
X = tf.placeholder("float", shape=[None, x_size])
y = tf.placeholder("float", shape=[None, y_size])
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
print(yhat)
predict = tf.argmax(yhat, dimension=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
updates = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
# Run SGD
sess = tf.Session()
#init = tf.global_variables_initializer()
init = tf.initialize_all_variables()
sess.run(init)
for epoch in range(100):
# Train with each example
for i in range(len(train_X)):
sess.run(updates, feed_dict={X: train_X[i: i + 1], y: train_y[i: i + 1]})
train_accuracy = np.mean(np.argmax(train_y, axis=1) ==
sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=1) ==
sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
sess.close()
if __name__ == '__main__':
main()
| 34.46
| 111
| 0.641613
|
062496e47dc0557b9fb2825bb955bb13648caa29
| 3,130
|
py
|
Python
|
tests/test_error.py
|
vBlackOut/MerossIot
|
677377cb6bf1ea2206622e5f80b1529837057585
|
[
"MIT"
] | null | null | null |
tests/test_error.py
|
vBlackOut/MerossIot
|
677377cb6bf1ea2206622e5f80b1529837057585
|
[
"MIT"
] | null | null | null |
tests/test_error.py
|
vBlackOut/MerossIot
|
677377cb6bf1ea2206622e5f80b1529837057585
|
[
"MIT"
] | null | null | null |
import os
from uuid import uuid4
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from meross_iot.controller.device import BaseDevice
from meross_iot.controller.mixins.toggle import ToggleXMixin
from meross_iot.http_api import MerossHttpClient
from meross_iot.manager import MerossManager
from meross_iot.model.enums import OnlineStatus, Namespace
from meross_iot.model.exception import CommandTimeoutError
EMAIL = os.environ.get('MEROSS_EMAIL')
PASSWORD = os.environ.get('MEROSS_PASSWORD')
class TestError(AioHTTPTestCase):
async def get_application(self):
return web.Application()
async def setUpAsync(self):
self.meross_client = await MerossHttpClient.async_from_user_password(email=EMAIL, password=PASSWORD)
# Look for a device to be used for this test
self.meross_manager = MerossManager(http_client=self.meross_client)
await self.meross_manager.async_init()
devices = await self.meross_manager.async_device_discovery()
@unittest_run_loop
async def test_invalid_target_device(self):
async def send_command_to_unknown_device():
random_uuid = uuid4()
return await self.meross_manager.async_execute_cmd(destination_device_uuid=str(random_uuid), method='GET',
namespace=Namespace.SYSTEM_ALL, payload={})
with self.assertRaises(CommandTimeoutError):
await send_command_to_unknown_device()
@unittest_run_loop
async def test_invalid_namespace(self):
devs = self.meross_manager.find_devices(device_class=ToggleXMixin, online_status=OnlineStatus.ONLINE)
if len(devs) < 1:
self.skipTest("No available/online devices found to test. Skipping...")
dev = devs[0]
async def send_invalid_command_to_device(dev: BaseDevice):
res = await self.meross_manager.async_execute_cmd(destination_device_uuid=dev.uuid, method='GET',
namespace=Namespace.HUB_MTS100_MODE, payload={})
return res
with self.assertRaises(CommandTimeoutError):
await send_invalid_command_to_device(dev=dev)
@unittest_run_loop
async def test_invalid_payload(self):
devs = self.meross_manager.find_devices(device_class=ToggleXMixin, online_status=OnlineStatus.ONLINE)
if len(devs) < 1:
self.skipTest("No available/online devices found to test. Skipping...")
dev = devs[0]
async def send_invalid_command_to_device(dev: BaseDevice):
return await self.meross_manager.async_execute_cmd(destination_device_uuid=dev.uuid, method='SET',
namespace=Namespace.HUB_MTS100_MODE,
payload={'temperature': 'bar'})
with self.assertRaises(CommandTimeoutError):
await send_invalid_command_to_device(dev=dev)
async def tearDownAsync(self):
await self.meross_client.async_logout()
| 43.472222
| 118
| 0.685623
|
40abb254186618d8bf03a6cbbd289b07d77ce6e7
| 412
|
py
|
Python
|
iccas/checks.py
|
janLuke/iccas-python
|
cac4f07c13afea312af66a7ae0b7cfcbac3d08ae
|
[
"MIT"
] | null | null | null |
iccas/checks.py
|
janLuke/iccas-python
|
cac4f07c13afea312af66a7ae0b7cfcbac3d08ae
|
[
"MIT"
] | 1
|
2020-12-01T22:25:23.000Z
|
2020-12-01T22:25:23.000Z
|
iccas/checks.py
|
janLuke/iccas-python
|
cac4f07c13afea312af66a7ae0b7cfcbac3d08ae
|
[
"MIT"
] | null | null | null |
"""
Sanity checks.
"""
def is_non_decreasing(df):
deltas = df.drop(columns="unknown", level=1).diff(1).dropna()
return (deltas >= 0).all(axis=None)
def totals_not_less_than_sum_of_sexes(data, variable):
assert variable in {"cases", "deaths"}
total = data[variable]
sum_of_sexes = data[f"male_{variable}"] + data[f"female_{variable}"]
return (total - sum_of_sexes >= 0).all(axis=None)
| 25.75
| 72
| 0.674757
|
2bf3637abaf908a98ffee6548e311693986904ae
| 1,419
|
py
|
Python
|
examples/lambda_singles_codegen.py
|
maxscheurer/pdaggerq
|
e9fef3466e0d0170afc3094ab79e603200e78dfb
|
[
"Apache-2.0"
] | 37
|
2020-09-17T19:29:18.000Z
|
2022-03-03T16:29:16.000Z
|
examples/lambda_singles_codegen.py
|
maxscheurer/pdaggerq
|
e9fef3466e0d0170afc3094ab79e603200e78dfb
|
[
"Apache-2.0"
] | 7
|
2021-02-28T19:22:12.000Z
|
2022-02-22T15:17:47.000Z
|
examples/lambda_singles_codegen.py
|
maxscheurer/pdaggerq
|
e9fef3466e0d0170afc3094ab79e603200e78dfb
|
[
"Apache-2.0"
] | 6
|
2021-02-16T22:34:29.000Z
|
2021-12-04T19:37:23.000Z
|
# ccsd lambda equations
# L = <0| (1+L) e(-T) H e(T) |0>
# dL/dtu = <0| e(-T) H e(T) |u> + <0| L e(-T) H e(T) |u> - <0| L tu e(-T) H e(T) |0>
import pdaggerq
from pdaggerq.parser import contracted_strings_to_tensor_terms
def main():
pq = pdaggerq.pq_helper("fermi")
pq.set_print_level(0)
print('')
print(' 0 = <0| e(-T) H e*m e(T)|0> + <0| L e(-T) [H, e*m] e(T)|0>')
print('')
# <0| e(-T) H e*m e(T)|0>
pq.set_left_operators(['1'])
pq.set_right_operators(['1'])
pq.add_st_operator(1.0,['f','e1(e,m)'],['t1','t2'])
pq.add_st_operator(1.0,['v','e1(e,m)'],['t1','t2'])
# <0| L e(-T) [H,e*m] e(T)|0>
pq.set_left_operators(['l1','l2'])
pq.add_st_operator( 1.0,['f','e1(e,m)'],['t1','t2'])
pq.add_st_operator( 1.0,['v','e1(e,m)'],['t1','t2'])
pq.add_st_operator(-1.0,['e1(e,m)','f'],['t1','t2'])
pq.add_st_operator(-1.0,['e1(e,m)','v'],['t1','t2'])
pq.simplify()
# grab list of fully-contracted strings, then print
singles_residual_terms = pq.fully_contracted_strings()
singles_residual_terms = contracted_strings_to_tensor_terms(singles_residual_terms)
for my_term in singles_residual_terms:
print("#\t", my_term)
print(my_term.einsum_string(update_val='lambda_one',
output_variables=('m', 'e')))
print()
pq.clear()
if __name__ == "__main__":
main()
| 26.773585
| 87
| 0.55673
|
88b6e7eb93c5b4f12dc362e47c72aec83bbf4090
| 1,474
|
py
|
Python
|
sequoia/spammers/continuous_spammer.py
|
CU-BISON-LAB/sequoia
|
c9f5c0cd059909efc6ac9d26fac624b150dbd7c8
|
[
"MIT"
] | 7
|
2020-12-11T02:01:56.000Z
|
2022-02-24T13:19:17.000Z
|
sequoia/spammers/continuous_spammer.py
|
CU-BISON-LAB/sequoia
|
c9f5c0cd059909efc6ac9d26fac624b150dbd7c8
|
[
"MIT"
] | 1
|
2021-10-18T14:26:40.000Z
|
2021-10-19T13:16:16.000Z
|
sequoia/spammers/continuous_spammer.py
|
CU-BISON-LAB/sequoia
|
c9f5c0cd059909efc6ac9d26fac624b150dbd7c8
|
[
"MIT"
] | 4
|
2021-04-12T21:17:39.000Z
|
2022-03-10T17:44:31.000Z
|
# continuous_spammer.py
# Periodically invokes bursts of HTTP lambda functions over a period of time.
from threading import Thread
import requests
import time
# Serverless functions
url1 = "lambda1.com"
url2 = "lambda2.com"
# Number of functions to invoke in each iteration
burst_size = 1000
# Number of iterations to run the spammer
no_of_iterations = 6
# Number of seconds between each iteration
seconds_between_iters = 15
# Iteration number to start invoking url2
start_iteration_url2 = 501
# Entry point of each worker thread.
# Invoke a serverless function at the specified url.
def invoke_function(function_url, uid):
PARAMS = {"id": uid}
response = requests.get(url = function_url, params = PARAMS, timeout=300)
print(response, response.content)
uid = 0
# Perform one iteration, then sleep for seconds_between_iters
def do_burst(iter_number):
for i in range(burst_size):
global uid
t1 = Thread(target=invoke_function, args=(url1,uid))
t1.start()
if iter_number >= start_iteration_url2:
t2 = Thread(target=invoke_function, args=(url2,uid))
t2.start()
uid = uid + 1
# Perform all iterations and sleep between each of them
for i in range(no_of_iterations):
print('iteration no:', i, 'with burst_size', burst_size)
start = time.time()
do_burst(i)
elapsed = time.time() - start
if seconds_between_iters > elapsed:
time.sleep(seconds_between_iters-elapsed)
| 29.48
| 77
| 0.719132
|
1e5cfc3741bfd74c5eb75a311d2223a8565f3f12
| 5,743
|
py
|
Python
|
python/line.py
|
dstndstn/wcs2kml
|
840667fe142bfe5c34f61fc2cd5fbfecfa27e87e
|
[
"BSD-3-Clause"
] | null | null | null |
python/line.py
|
dstndstn/wcs2kml
|
840667fe142bfe5c34f61fc2cd5fbfecfa27e87e
|
[
"BSD-3-Clause"
] | 1
|
2020-03-08T04:42:30.000Z
|
2020-03-08T04:42:30.000Z
|
python/line.py
|
dstndstn/wcs2kml
|
840667fe142bfe5c34f61fc2cd5fbfecfa27e87e
|
[
"BSD-3-Clause"
] | 1
|
2020-02-29T19:56:15.000Z
|
2020-02-29T19:56:15.000Z
|
#!/usr/bin/env python
# Class for lines
# Copyright (c) 2005, 2006, 2007, Jeremy Brewer
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Changelog:
#
# 2/7/06 Updated documentation.
"""
Line class module
The Line class is a simple class for holding the slope and intercept of a
line. Line objects are callable -- when called, they evaluate at the given
x value, e.g. y = line(x) gives the value of line at x.
Example Usage.:
l = line.Line(2.0, 3.0)
x1, x2, x3 = 3.14, 0.0, 1.0
print "The line is %s" % l
print "f(%f) = %f" % (x1, l(x1))
print "f(%f) = %f" % (x2, l(x2))
print "f(%f) = %f" % (x3, l(x3))
lp = l.perpToAtX(0.0)
print "Line perpendicular to original at x = 0 is %s" % lp
lp = l.perpToAtY(3.0)
print "Line perpendicular to original at y = 3 is %s" % lp
flip = l.flipXY()
print "Line with flipped x, y is %s" % flip
fit = line.LinearFit(2.0, 3.0, 0.987)
print "Linear fit is %s" % fit
"""
__author__ = "Jeremy Brewer (jeremy.d.brewer@gmail.com)"
__copyright__ = "Copyright 2005, 2006, 2007 Jeremy Brewer"
__license__ = "BSD"
__version__ = "1.0"
class Line(object):
"""Class for describing lines"""
def __init__(self, slope=0.0, intercept=0.0):
"""
Initializes a line to have the given slope and intercept.
Input: slope -- slope of the line
intercept -- intercept of the line
"""
try:
self.slope = float(slope)
except ValueError:
raise TypeError("invalid slope value '%s'" % slope)
try:
self.intercept = float(intercept)
except ValueError:
raise TypeError("invalid intercept value '%s'" % intercept)
def __str__(self):
"""Returns a string representation of the line"""
return "y = %f * x + %f" % (self.slope, self.intercept)
def __call__(self, x):
"""Evaluates a line at a given position x"""
assert isinstance(x, float)
return self.slope * x + self.intercept
def perpToAtX(self, x):
"""Returns a line perpendicular to this line at the given x position"""
assert isinstance(x, float)
perpSlope = -1.0 / self.slope
perpIntercept = x * (self.slope + 1.0 / self.slope) + self.intercept
return Line(perpSlope, perpIntercept)
def perpToAtY(self, y):
"""Returns a line perpendicular to this line at the given x position"""
assert isinstance(y, float)
x = (y - self.intercept) / self.slope
return self.perpToAtX(x)
def flipXY(self):
"""Creates a line where x and y have been flipped"""
if self.slope == 0.0:
raise ZeroDivisionError("cannot flip line with slope = 0")
newSlope = 1.0 / self.slope
newIntercept = -self.intercept / self.slope
return Line(newSlope, newIntercept)
class LinearFit(Line):
"""Class for describing linear fits"""
def __init__(self, slope=0.0, intercept=0.0, r2=0.0):
"""
Initializes a linear fit to have the given slope, intercept, and
correlation coefficient.
Input: slope -- slope of the line
intercept -- intercept of the line
r2 -- correlation coefficient of the line
"""
Line.__init__(self, slope, intercept)
try:
self.r2 = float(r2)
except ValueError:
raise TypeError("invalid r2 value '%s'" % r2)
def __str__(self):
"""Returns a string representation of the linear fit"""
return "y = %f * x + %f, r^2 = %f" % (self.slope, self.intercept,
self.r2)
# testing code
if __name__ == "__main__":
l = Line(2.0, 3.0)
x1, x2, x3 = 3.14, 0.0, 1.0
print "The line is %s" % l
print "f(%f) = %f" % (x1, l(x1))
print "f(%f) = %f" % (x2, l(x2))
print "f(%f) = %f" % (x3, l(x3))
lp = l.perpToAtX(0.0)
print "Line perpendicular to original at x = 0 is %s" % lp
lp = l.perpToAtY(3.0)
print "Line perpendicular to original at y = 3 is %s" % lp
flip = l.flipXY()
print "Line with flipped x, y is %s" % flip
fit = LinearFit(2.0, 3.0, 0.987)
print "Linear fit is %s" % fit
| 32.264045
| 80
| 0.623193
|
dee6648b36ecdf6c222a0b0854ece2b147130f20
| 22,454
|
py
|
Python
|
sympy/assumptions/handlers/sets.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 2
|
2020-07-27T16:36:27.000Z
|
2020-12-29T22:28:37.000Z
|
sympy/assumptions/handlers/sets.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 2
|
2020-08-18T15:21:59.000Z
|
2020-08-18T19:35:29.000Z
|
sympy/assumptions/handlers/sets.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 3
|
2019-05-18T21:32:31.000Z
|
2019-07-26T11:05:46.000Z
|
"""
Handlers for predicates related to set membership: integer, rational, etc.
"""
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler, test_closed_group
from sympy.core.numbers import pi
from sympy.core.logic import fuzzy_bool
from sympy.functions.elementary.exponential import exp, log
from sympy import I, Eq, conjugate, MatrixBase
class AskIntegerHandler(CommonHandler):
"""
Handler for Q.integer
Test that an expression belongs to the field of integer numbers
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_integer
@staticmethod
def _number(expr, assumptions):
# helper method
try:
i = int(expr.round())
if not (expr - i).equals(0):
raise TypeError
return True
except TypeError:
return False
@staticmethod
def Add(expr, assumptions):
"""
Integer + Integer -> Integer
Integer + !Integer -> !Integer
!Integer + !Integer -> ?
"""
if expr.is_number:
return AskIntegerHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.integer)
@staticmethod
def Mul(expr, assumptions):
"""
Integer*Integer -> Integer
Integer*Irrational -> !Integer
Odd/Even -> !Integer
Integer*Rational -> ?
"""
if expr.is_number:
return AskIntegerHandler._number(expr, assumptions)
_output = True
for arg in expr.args:
if not ask(Q.integer(arg), assumptions):
if arg.is_Rational:
if arg.q == 2:
return ask(Q.even(2*expr), assumptions)
if ~(arg.q & 1):
return None
elif ask(Q.irrational(arg), assumptions):
if _output:
_output = False
else:
return
else:
return
return _output
Pow = Add
int, Integer = [staticmethod(CommonHandler.AlwaysTrue)]*2
Pi, Exp1, GoldenRatio, TribonacciConstant, Infinity, NegativeInfinity, ImaginaryUnit = \
[staticmethod(CommonHandler.AlwaysFalse)]*7
@staticmethod
def Rational(expr, assumptions):
# rationals with denominator one get
# evaluated to Integers
return False
@staticmethod
def Abs(expr, assumptions):
return ask(Q.integer(expr.args[0]), assumptions)
@staticmethod
def MatrixElement(expr, assumptions):
return ask(Q.integer_elements(expr.args[0]), assumptions)
Determinant = Trace = MatrixElement
class AskRationalHandler(CommonHandler):
"""
Handler for Q.rational
Test that an expression belongs to the field of rational numbers
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_rational
@staticmethod
def Add(expr, assumptions):
"""
Rational + Rational -> Rational
Rational + !Rational -> !Rational
!Rational + !Rational -> ?
"""
if expr.is_number:
if expr.as_real_imag()[1]:
return False
return test_closed_group(expr, assumptions, Q.rational)
Mul = Add
@staticmethod
def Pow(expr, assumptions):
"""
Rational ** Integer -> Rational
Irrational ** Rational -> Irrational
Rational ** Irrational -> ?
"""
if ask(Q.integer(expr.exp), assumptions):
return ask(Q.rational(expr.base), assumptions)
elif ask(Q.rational(expr.exp), assumptions):
if ask(Q.prime(expr.base), assumptions):
return False
Rational = staticmethod(CommonHandler.AlwaysTrue)
Float = staticmethod(CommonHandler.AlwaysNone)
ImaginaryUnit, Infinity, NegativeInfinity, Pi, Exp1, GoldenRatio, TribonacciConstant = \
[staticmethod(CommonHandler.AlwaysFalse)]*7
@staticmethod
def exp(expr, assumptions):
x = expr.args[0]
if ask(Q.rational(x), assumptions):
return ask(~Q.nonzero(x), assumptions)
@staticmethod
def cot(expr, assumptions):
x = expr.args[0]
if ask(Q.rational(x), assumptions):
return False
@staticmethod
def log(expr, assumptions):
x = expr.args[0]
if ask(Q.rational(x), assumptions):
return ask(~Q.nonzero(x - 1), assumptions)
sin, cos, tan, asin, atan = [exp]*5
acos, acot = log, cot
class AskIrrationalHandler(CommonHandler):
@staticmethod
def Expr(expr, assumptions):
return expr.is_irrational
@staticmethod
def Basic(expr, assumptions):
_real = ask(Q.real(expr), assumptions)
if _real:
_rational = ask(Q.rational(expr), assumptions)
if _rational is None:
return None
return not _rational
else:
return _real
class AskRealHandler(CommonHandler):
"""
Handler for Q.real
Test that an expression belongs to the field of real numbers
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_real
@staticmethod
def _number(expr, assumptions):
# let as_real_imag() work first since the expression may
# be simpler to evaluate
i = expr.as_real_imag()[1].evalf(2)
if i._prec != 1:
return not i
# allow None to be returned if we couldn't show for sure
# that i was 0
@staticmethod
def Add(expr, assumptions):
"""
Real + Real -> Real
Real + (Complex & !Real) -> !Real
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.real)
@staticmethod
def Mul(expr, assumptions):
"""
Real*Real -> Real
Real*Imaginary -> !Real
Imaginary*Imaginary -> Real
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
result = True
for arg in expr.args:
if ask(Q.real(arg), assumptions):
pass
elif ask(Q.imaginary(arg), assumptions):
result = result ^ True
else:
break
else:
return result
@staticmethod
def Pow(expr, assumptions):
"""
Real**Integer -> Real
Positive**Real -> Real
Real**(Integer/Even) -> Real if base is nonnegative
Real**(Integer/Odd) -> Real
Imaginary**(Integer/Even) -> Real
Imaginary**(Integer/Odd) -> not Real
Imaginary**Real -> ? since Real could be 0 (giving real) or 1 (giving imaginary)
b**Imaginary -> Real if log(b) is imaginary and b != 0 and exponent != integer multiple of I*pi/log(b)
Real**Real -> ? e.g. sqrt(-1) is imaginary and sqrt(2) is not
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
if expr.base.func == exp:
if ask(Q.imaginary(expr.base.args[0]), assumptions):
if ask(Q.imaginary(expr.exp), assumptions):
return True
# If the i = (exp's arg)/(I*pi) is an integer or half-integer
# multiple of I*pi then 2*i will be an integer. In addition,
# exp(i*I*pi) = (-1)**i so the overall realness of the expr
# can be determined by replacing exp(i*I*pi) with (-1)**i.
i = expr.base.args[0]/I/pi
if ask(Q.integer(2*i), assumptions):
return ask(Q.real(((-1)**i)**expr.exp), assumptions)
return
if ask(Q.imaginary(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
odd = ask(Q.odd(expr.exp), assumptions)
if odd is not None:
return not odd
return
if ask(Q.imaginary(expr.exp), assumptions):
imlog = ask(Q.imaginary(log(expr.base)), assumptions)
if imlog is not None:
# I**i -> real, log(I) is imag;
# (2*I)**i -> complex, log(2*I) is not imag
return imlog
if ask(Q.real(expr.base), assumptions):
if ask(Q.real(expr.exp), assumptions):
if expr.exp.is_Rational and \
ask(Q.even(expr.exp.q), assumptions):
return ask(Q.positive(expr.base), assumptions)
elif ask(Q.integer(expr.exp), assumptions):
return True
elif ask(Q.positive(expr.base), assumptions):
return True
elif ask(Q.negative(expr.base), assumptions):
return False
Rational, Float, Pi, Exp1, GoldenRatio, TribonacciConstant, Abs, re, im = \
[staticmethod(CommonHandler.AlwaysTrue)]*9
ImaginaryUnit, Infinity, NegativeInfinity = \
[staticmethod(CommonHandler.AlwaysFalse)]*3
@staticmethod
def sin(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
return True
cos = sin
@staticmethod
def exp(expr, assumptions):
return ask(Q.integer(expr.args[0]/I/pi) | Q.real(expr.args[0]), assumptions)
@staticmethod
def log(expr, assumptions):
return ask(Q.positive(expr.args[0]), assumptions)
@staticmethod
def MatrixElement(expr, assumptions):
return ask(Q.real_elements(expr.args[0]), assumptions)
Determinant = Trace = MatrixElement
class AskExtendedRealHandler(AskRealHandler):
"""
Handler for Q.extended_real
Test that an expression belongs to the field of extended real numbers,
that is real numbers union {Infinity, -Infinity}
"""
@staticmethod
def Add(expr, assumptions):
return test_closed_group(expr, assumptions, Q.extended_real)
Mul, Pow = [Add]*2
Infinity, NegativeInfinity = [staticmethod(CommonHandler.AlwaysTrue)]*2
class AskHermitianHandler(AskRealHandler):
"""
Handler for Q.hermitian
Test that an expression belongs to the field of Hermitian operators
"""
@staticmethod
def Expr(expr, assumptions):
if isinstance(expr, MatrixBase):
return None
return AskRealHandler.Expr(expr, assumptions)
@staticmethod
def Add(expr, assumptions):
"""
Hermitian + Hermitian -> Hermitian
Hermitian + !Hermitian -> !Hermitian
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.hermitian)
@staticmethod
def Mul(expr, assumptions):
"""
As long as there is at most only one noncommutative term:
Hermitian*Hermitian -> Hermitian
Hermitian*Antihermitian -> !Hermitian
Antihermitian*Antihermitian -> Hermitian
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
nccount = 0
result = True
for arg in expr.args:
if ask(Q.antihermitian(arg), assumptions):
result = result ^ True
elif not ask(Q.hermitian(arg), assumptions):
break
if ask(~Q.commutative(arg), assumptions):
nccount += 1
if nccount > 1:
break
else:
return result
@staticmethod
def Pow(expr, assumptions):
"""
Hermitian**Integer -> Hermitian
"""
if expr.is_number:
return AskRealHandler._number(expr, assumptions)
if ask(Q.hermitian(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
return True
@staticmethod
def sin(expr, assumptions):
if ask(Q.hermitian(expr.args[0]), assumptions):
return True
cos, exp = [sin]*2
@staticmethod
def MatrixBase(mat, assumptions):
rows, cols = mat.shape
ret_val = True
for i in range(rows):
for j in range(i, cols):
cond = fuzzy_bool(Eq(mat[i, j], conjugate(mat[j, i])))
if cond == None:
ret_val = None
if cond == False:
return False
return ret_val
class AskComplexHandler(CommonHandler):
"""
Handler for Q.complex
Test that an expression belongs to the field of complex numbers
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_complex
@staticmethod
def Add(expr, assumptions):
return test_closed_group(expr, assumptions, Q.complex)
Mul, Pow = [Add]*2
Number, sin, cos, log, exp, re, im, NumberSymbol, Abs, ImaginaryUnit = \
[staticmethod(CommonHandler.AlwaysTrue)]*10 # they are all complex functions or expressions
Infinity, NegativeInfinity = [staticmethod(CommonHandler.AlwaysFalse)]*2
@staticmethod
def MatrixElement(expr, assumptions):
return ask(Q.complex_elements(expr.args[0]), assumptions)
Determinant = Trace = MatrixElement
class AskImaginaryHandler(CommonHandler):
"""
Handler for Q.imaginary
Test that an expression belongs to the field of imaginary numbers,
that is, numbers in the form x*I, where x is real
"""
@staticmethod
def Expr(expr, assumptions):
return expr.is_imaginary
@staticmethod
def _number(expr, assumptions):
# let as_real_imag() work first since the expression may
# be simpler to evaluate
r = expr.as_real_imag()[0].evalf(2)
if r._prec != 1:
return not r
# allow None to be returned if we couldn't show for sure
# that r was 0
@staticmethod
def Add(expr, assumptions):
"""
Imaginary + Imaginary -> Imaginary
Imaginary + Complex -> ?
Imaginary + Real -> !Imaginary
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
reals = 0
for arg in expr.args:
if ask(Q.imaginary(arg), assumptions):
pass
elif ask(Q.real(arg), assumptions):
reals += 1
else:
break
else:
if reals == 0:
return True
if reals == 1 or (len(expr.args) == reals):
# two reals could sum 0 thus giving an imaginary
return False
@staticmethod
def Mul(expr, assumptions):
"""
Real*Imaginary -> Imaginary
Imaginary*Imaginary -> Real
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
result = False
reals = 0
for arg in expr.args:
if ask(Q.imaginary(arg), assumptions):
result = result ^ True
elif not ask(Q.real(arg), assumptions):
break
else:
if reals == len(expr.args):
return False
return result
@staticmethod
def Pow(expr, assumptions):
"""
Imaginary**Odd -> Imaginary
Imaginary**Even -> Real
b**Imaginary -> !Imaginary if exponent is an integer multiple of I*pi/log(b)
Imaginary**Real -> ?
Positive**Real -> Real
Negative**Integer -> Real
Negative**(Integer/2) -> Imaginary
Negative**Real -> not Imaginary if exponent is not Rational
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
if expr.base.func == exp:
if ask(Q.imaginary(expr.base.args[0]), assumptions):
if ask(Q.imaginary(expr.exp), assumptions):
return False
i = expr.base.args[0]/I/pi
if ask(Q.integer(2*i), assumptions):
return ask(Q.imaginary(((-1)**i)**expr.exp), assumptions)
if ask(Q.imaginary(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
odd = ask(Q.odd(expr.exp), assumptions)
if odd is not None:
return odd
return
if ask(Q.imaginary(expr.exp), assumptions):
imlog = ask(Q.imaginary(log(expr.base)), assumptions)
if imlog is not None:
return False # I**i -> real; (2*I)**i -> complex ==> not imaginary
if ask(Q.real(expr.base) & Q.real(expr.exp), assumptions):
if ask(Q.positive(expr.base), assumptions):
return False
else:
rat = ask(Q.rational(expr.exp), assumptions)
if not rat:
return rat
if ask(Q.integer(expr.exp), assumptions):
return False
else:
half = ask(Q.integer(2*expr.exp), assumptions)
if half:
return ask(Q.negative(expr.base), assumptions)
return half
@staticmethod
def log(expr, assumptions):
if ask(Q.real(expr.args[0]), assumptions):
if ask(Q.positive(expr.args[0]), assumptions):
return False
return
# XXX it should be enough to do
# return ask(Q.nonpositive(expr.args[0]), assumptions)
# but ask(Q.nonpositive(exp(x)), Q.imaginary(x)) -> None;
# it should return True since exp(x) will be either 0 or complex
if expr.args[0].func == exp:
if expr.args[0].args[0] in [I, -I]:
return True
im = ask(Q.imaginary(expr.args[0]), assumptions)
if im is False:
return False
@staticmethod
def exp(expr, assumptions):
a = expr.args[0]/I/pi
return ask(Q.integer(2*a) & ~Q.integer(a), assumptions)
@staticmethod
def Number(expr, assumptions):
return not (expr.as_real_imag()[1] == 0)
NumberSymbol = Number
ImaginaryUnit = staticmethod(CommonHandler.AlwaysTrue)
class AskAntiHermitianHandler(AskImaginaryHandler):
"""
Handler for Q.antihermitian
Test that an expression belongs to the field of anti-Hermitian operators,
that is, operators in the form x*I, where x is Hermitian
"""
@staticmethod
def Expr(expr, assumptions):
if isinstance(expr, MatrixBase):
return None
return AskImaginaryHandler.Expr(expr, assumptions)
@staticmethod
def Add(expr, assumptions):
"""
Antihermitian + Antihermitian -> Antihermitian
Antihermitian + !Antihermitian -> !Antihermitian
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
return test_closed_group(expr, assumptions, Q.antihermitian)
@staticmethod
def Mul(expr, assumptions):
"""
As long as there is at most only one noncommutative term:
Hermitian*Hermitian -> !Antihermitian
Hermitian*Antihermitian -> Antihermitian
Antihermitian*Antihermitian -> !Antihermitian
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
nccount = 0
result = False
for arg in expr.args:
if ask(Q.antihermitian(arg), assumptions):
result = result ^ True
elif not ask(Q.hermitian(arg), assumptions):
break
if ask(~Q.commutative(arg), assumptions):
nccount += 1
if nccount > 1:
break
else:
return result
@staticmethod
def Pow(expr, assumptions):
"""
Hermitian**Integer -> !Antihermitian
Antihermitian**Even -> !Antihermitian
Antihermitian**Odd -> Antihermitian
"""
if expr.is_number:
return AskImaginaryHandler._number(expr, assumptions)
if ask(Q.hermitian(expr.base), assumptions):
if ask(Q.integer(expr.exp), assumptions):
return False
elif ask(Q.antihermitian(expr.base), assumptions):
if ask(Q.even(expr.exp), assumptions):
return False
elif ask(Q.odd(expr.exp), assumptions):
return True
@staticmethod
def MatrixBase(mat, assumptions):
rows, cols = mat.shape
ret_val = True
for i in range(rows):
for j in range(i, cols):
cond = fuzzy_bool(Eq(mat[i, j], -conjugate(mat[j, i])))
if cond == None:
ret_val = None
if cond == False:
return False
return ret_val
class AskAlgebraicHandler(CommonHandler):
"""Handler for Q.algebraic key. """
@staticmethod
def Add(expr, assumptions):
return test_closed_group(expr, assumptions, Q.algebraic)
@staticmethod
def Mul(expr, assumptions):
return test_closed_group(expr, assumptions, Q.algebraic)
@staticmethod
def Pow(expr, assumptions):
return expr.exp.is_Rational and ask(
Q.algebraic(expr.base), assumptions)
@staticmethod
def Rational(expr, assumptions):
return expr.q != 0
Float, GoldenRatio, TribonacciConstant, ImaginaryUnit, AlgebraicNumber = \
[staticmethod(CommonHandler.AlwaysTrue)]*5
Infinity, NegativeInfinity, ComplexInfinity, Pi, Exp1 = \
[staticmethod(CommonHandler.AlwaysFalse)]*5
@staticmethod
def exp(expr, assumptions):
x = expr.args[0]
if ask(Q.algebraic(x), assumptions):
return ask(~Q.nonzero(x), assumptions)
@staticmethod
def cot(expr, assumptions):
x = expr.args[0]
if ask(Q.algebraic(x), assumptions):
return False
@staticmethod
def log(expr, assumptions):
x = expr.args[0]
if ask(Q.algebraic(x), assumptions):
return ask(~Q.nonzero(x - 1), assumptions)
sin, cos, tan, asin, atan = [exp]*5
acos, acot = log, cot
| 31.580872
| 124
| 0.568139
|
d0e563e69d8d1623662931af3065a7aabb8d58e8
| 102
|
py
|
Python
|
mmfparse/__init__.py
|
michaelgfalk/mmf-parser
|
7dcaddc1a799d0445fb31856db8ec4144c2932a0
|
[
"Unlicense"
] | null | null | null |
mmfparse/__init__.py
|
michaelgfalk/mmf-parser
|
7dcaddc1a799d0445fb31856db8ec4144c2932a0
|
[
"Unlicense"
] | null | null | null |
mmfparse/__init__.py
|
michaelgfalk/mmf-parser
|
7dcaddc1a799d0445fb31856db8ec4144c2932a0
|
[
"Unlicense"
] | 1
|
2019-08-14T06:14:36.000Z
|
2019-08-14T06:14:36.000Z
|
name = "mmfparse"
# Make the key classes available from top level
from mmfparse.core import MMFParser
| 25.5
| 47
| 0.794118
|
cc5cad949d6dcfddb8a2d95c296cbbf00ec95074
| 7,041
|
py
|
Python
|
pepper/modules/python/models/predict_distributed_gpu.py
|
Samteymoori/pepper
|
734d226de47a855952e3b58145c1fcfbe221d3b4
|
[
"MIT"
] | 155
|
2019-11-08T09:55:24.000Z
|
2022-03-30T08:38:34.000Z
|
pepper/modules/python/models/predict_distributed_gpu.py
|
Samteymoori/pepper
|
734d226de47a855952e3b58145c1fcfbe221d3b4
|
[
"MIT"
] | 131
|
2020-04-06T05:02:42.000Z
|
2022-03-31T07:33:45.000Z
|
pepper/modules/python/models/predict_distributed_gpu.py
|
Samteymoori/pepper
|
734d226de47a855952e3b58145c1fcfbe221d3b4
|
[
"MIT"
] | 24
|
2019-07-09T18:49:58.000Z
|
2022-02-21T09:09:38.000Z
|
import sys
import os
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from pepper.modules.python.models.dataloader_predict import SequenceDataset
from tqdm import tqdm
from pepper.modules.python.models.ModelHander import ModelHandler
from pepper.modules.python.Options import ImageSizeOptions, TrainOptions
from pepper.modules.python.DataStorePredict import DataStore
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
def predict(input_filepath, file_chunks, output_filepath, model_path, batch_size, num_workers, rank, device_id):
transducer_model, hidden_size, gru_layers, prev_ite = \
ModelHandler.load_simple_model_for_training(model_path,
input_channels=ImageSizeOptions.IMAGE_CHANNELS,
image_features=ImageSizeOptions.IMAGE_HEIGHT,
seq_len=ImageSizeOptions.SEQ_LENGTH,
num_classes=ImageSizeOptions.TOTAL_LABELS)
transducer_model.eval()
transducer_model = transducer_model.eval()
# create output file
output_filename = output_filepath + "pepper_prediction_" + str(device_id) + ".hdf"
prediction_data_file = DataStore(output_filename, mode='w')
# data loader
input_data = SequenceDataset(input_filepath, file_chunks)
data_loader = DataLoader(input_data,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
torch.cuda.set_device(device_id)
transducer_model.to(device_id)
transducer_model.eval()
transducer_model = DistributedDataParallel(transducer_model, device_ids=[device_id])
if rank == 0:
progress_bar = tqdm(
total=len(data_loader),
ncols=100,
leave=False,
position=rank,
desc="GPU #" + str(device_id),
)
with torch.no_grad():
for contig, contig_start, contig_end, chunk_id, images, position, index in data_loader:
sys.stderr.flush()
images = images.type(torch.FloatTensor)
hidden = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)
prediction_base_tensor = torch.zeros((images.size(0), images.size(1), ImageSizeOptions.TOTAL_LABELS))
images = images.to(device_id)
hidden = hidden.to(device_id)
prediction_base_tensor = prediction_base_tensor.to(device_id)
for i in range(0, ImageSizeOptions.SEQ_LENGTH, TrainOptions.WINDOW_JUMP):
if i + TrainOptions.TRAIN_WINDOW > ImageSizeOptions.SEQ_LENGTH:
break
chunk_start = i
chunk_end = i + TrainOptions.TRAIN_WINDOW
# chunk all the data
image_chunk = images[:, chunk_start:chunk_end]
# run inference
output_base, hidden = transducer_model(image_chunk, hidden)
# now calculate how much padding is on the top and bottom of this chunk so we can do a simple
# add operation
top_zeros = chunk_start
bottom_zeros = ImageSizeOptions.SEQ_LENGTH - chunk_end
# do softmax and get prediction
# we run a softmax a padding to make the output tensor compatible for adding
inference_layers = nn.Sequential(
nn.Softmax(dim=2),
nn.ZeroPad2d((0, 0, top_zeros, bottom_zeros))
)
inference_layers = inference_layers.to(device_id)
# run the softmax and padding layers
base_prediction = inference_layers(output_base).to(device_id)
# now simply add the tensor to the global counter
prediction_base_tensor = torch.add(prediction_base_tensor, base_prediction)
del inference_layers
torch.cuda.empty_cache()
base_values, base_labels = torch.max(prediction_base_tensor, 2)
# this part is for the phred score calculation
counts = torch.ones((base_values.size(0), base_values.size(1) - 2 * ImageSizeOptions.SEQ_OVERLAP))
top_ones = nn.ZeroPad2d((ImageSizeOptions.SEQ_OVERLAP, ImageSizeOptions.SEQ_OVERLAP))
counts = top_ones(counts) + 1
base_values = base_labels.cpu().numpy()
phred_score = -10 * torch.log10(1.0 - (base_values / counts))
phred_score[phred_score == float('inf')] = 100
predicted_base_labels = base_labels.cpu().numpy()
phred_score = phred_score.cpu().numpy()
for i in range(images.size(0)):
prediction_data_file.write_prediction(contig[i], contig_start[i], contig_end[i], chunk_id[i],
position[i], index[i], predicted_base_labels[i], phred_score[i])
if rank == 0:
progress_bar.update(1)
if rank == 0:
progress_bar.close()
def cleanup():
dist.destroy_process_group()
def setup(rank, device_ids, args, all_input_files):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=len(device_ids))
filepath, output_filepath, model_path, batch_size, num_workers = args
# issue with semaphore lock: https://github.com/pytorch/pytorch/issues/2517
# mp.set_start_method('spawn')
# Explicitly setting seed to make sure that models created in two processes
# start from same random weights and biases. https://github.com/pytorch/pytorch/issues/2517
# torch.manual_seed(42)
predict(filepath,
all_input_files[rank],
output_filepath,
model_path,
batch_size,
num_workers,
rank,
device_ids[rank])
cleanup()
def predict_distributed_gpu(filepath, file_chunks, output_filepath, model_path, batch_size, device_ids, num_workers):
"""
Create a prediction table/dictionary of an images set using a trained model.
:param filepath: Path to image files to predict on
:param file_chunks: Path to chunked files
:param batch_size: Batch size used for prediction
:param model_path: Path to a trained model
:param output_filepath: Path to output directory
:param device_ids: List of GPU devices to use
:param num_workers: Number of workers to be used by the dataloader
:return: Prediction dictionary
"""
args = (filepath, output_filepath, model_path, batch_size, num_workers)
mp.spawn(setup,
args=(device_ids, args, file_chunks),
nprocs=len(device_ids),
join=True)
| 42.161677
| 118
| 0.640534
|
a6c9caacc780629dc0e002f55c2939a03f87e714
| 14,263
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_backward.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/test_backward.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_backward.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
import paddle.static as static
import paddle
import numpy as np
class BackwardNet(object):
"""
Abstract Base Class.
All Net inherited this Class should implement two functions:
build_model: build net to test the logic of backward
init_data: fake input data to test all programs.
"""
def __init__(self):
self.stop_gradient_grad_vars = set()
self.no_grad_vars = set()
self.params_names = set()
self.op_path = []
def build_model(self):
"""
Build net to test the logic of backward.
:return: loss
"""
raise NotImplementedError
def init_data(self):
"""
Fake input data to test all programs.
:return: dict, {'var_name': var_data}
"""
raise NotImplementedError
class TestBackward(unittest.TestCase):
"""
All related TestClass should inherit this class,
and only implement test_backward function.
"""
def _check_all(self, net):
place = fluid.CUDAPlace(
0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = net.build_model()
self._check_backward(loss, main)
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(loss)
exe.run(startup)
exe.run(feed=net.init_data())
def _check_backward(self, loss, main_program):
global_block_idx = self.global_block_idx
params_grads = self._check_params_grad(loss)
# 1.1 get_stop_gradients
no_grad_dict = self._check_stop_gradient(main_program)
# 1.2 find_op_path
op_path, block_no_grad_set = self._check_op_path(
main_program.block(global_block_idx), [loss], [], no_grad_dict)
# 1.3 _find_no_grad_vars
no_grad_vars = self._check_find_no_grad_vars(
main_program.block(global_block_idx), op_path, [loss],
block_no_grad_set)
# update no_grad_dict
block_no_grad_set.update(no_grad_vars)
no_grad_dict[global_block_idx].update(
list(map(fluid.backward._append_grad_suffix_, block_no_grad_set)))
def _check_params_grad(self, loss, parameter_list=None, no_grad_set=None):
params_grads = fluid.backward.append_backward(loss, parameter_list,
no_grad_set)
params_names = set(
[param_var.name for (param_var, grad_var) in params_grads])
self.assertSetEqual(params_names, self.net.params_names)
return params_grads
def _check_stop_gradient(self, program):
no_grad_dict = fluid.backward._get_stop_gradients_(program)
if no_grad_dict is not None and isinstance(no_grad_dict, dict):
self.assertSetEqual(no_grad_dict[self.global_block_idx],
self.net.stop_gradient_grad_vars)
return no_grad_dict
def _check_op_path(self, root_block, outputs, inputs=[], no_grad_dict=None):
if no_grad_dict is None or not isinstance(no_grad_dict, dict):
block_no_grad_set = None
else:
block_no_grad_set = set(
map(fluid.backward._strip_grad_suffix_,
no_grad_dict[self.global_block_idx]))
op_path = fluid.backward._find_op_path_(root_block, outputs, inputs,
block_no_grad_set)
op_types = [op.type for op in op_path]
self.assertListEqual(op_types, self.net.op_path)
return op_path, block_no_grad_set
def _check_find_no_grad_vars(self, root_block, op_path, targets,
block_no_grad_set):
no_grad_vars = fluid.backward._find_no_grad_vars(
root_block, op_path, targets, block_no_grad_set)
self.assertSetEqual(no_grad_vars, self.net.no_grad_vars)
return no_grad_vars
def _check_error_param_list(self, net, parameter_list):
place = fluid.CUDAPlace(
0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = net.build_model()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(loss, parameter_list=parameter_list)
exe.run(startup)
exe.run(feed=net.init_data())
def _check_error_no_grad_set(self, net, no_grad_set):
place = fluid.CUDAPlace(
0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = net.build_model()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(loss, no_grad_set=no_grad_set)
exe.run(startup)
exe.run(feed=net.init_data())
class SimpleNet(BackwardNet):
def __init__(self):
super(SimpleNet, self).__init__()
self.stop_gradient_grad_vars = set([
u'x_no_grad@GRAD', u'x2_no_grad@GRAD', u'x3_no_grad@GRAD',
u'label_no_grad@GRAD'
])
self.no_grad_vars = set()
self.params_names = set([u'w2v', u'fc_predict.b_0', u'fc_w'])
self.op_path = [
u'lookup_table_v2',
u'lookup_table_v2', # embedding
u'elementwise_add', # merge
u'mul',
u'elementwise_add',
u'softmax', # fc
u'elementwise_sub',
u'square',
u'mean'
] # loss
self.shape = [16, 50]
def init_data(self):
assert len(self.shape) == 2
x = np.random.randint(0, 90, self.shape).astype('int64')
x2 = np.random.randint(0, 90, self.shape).astype('int64')
x3 = np.random.randint(0, 90, self.shape).astype('int64')
label = np.random.random([self.shape[0], 1]).astype('float32')
return {
'x_no_grad': x,
'x2_no_grad': x2,
'x3_no_grad': x3,
'label_no_grad': label
}
def build_model(self):
# stop_gradient = True in input
x = fluid.data(name='x_no_grad', shape=self.shape, dtype='int64')
x2 = fluid.data(name='x2_no_grad', shape=self.shape, dtype='int64')
x3 = fluid.data(name='x3_no_grad', shape=self.shape, dtype='int64')
label = fluid.data(name='label_no_grad',
shape=[self.shape[0], 1],
dtype='float32')
# shared layer, the grad of 'w2v' will be summed and renamed.
# To test _addup_repetitive_outputs_
x_emb = fluid.embedding(x,
size=[100, 64],
param_attr=fluid.ParamAttr(name='w2v'))
x2_emb = fluid.embedding(x2,
size=[100, 64],
param_attr=fluid.ParamAttr(name='w2v'))
x3_emb = fluid.embedding(x3,
size=[100, 64],
param_attr=fluid.ParamAttr(name='w2v'))
# merge layers
x_merge = fluid.layers.elementwise_add(x_emb, x2_emb, name='x_add_x2')
x2_merge = fluid.layers.elementwise_add(x2_emb,
x3_emb,
name='x2_add_x3')
# shared fc_w
predict = fluid.layers.fc(input=x_merge,
size=1,
act='softmax',
param_attr=fluid.ParamAttr(name='fc_w'),
name='fc_predict')
# useless layer for calculating loss
fc_no_use = fluid.layers.fc(input=x2_merge,
size=1,
act='sigmoid',
param_attr=fluid.ParamAttr(name='fc_w'),
name='fc_no_use')
# loss
cost = fluid.layers.square_error_cost(input=predict, label=label)
loss = fluid.layers.mean(cost, name='mean_loss')
return loss
class TestSimpleNet(TestBackward):
def test_backward(self):
"""
Instantiate each NetClass to test backward.
"""
self.global_block_idx = 0
self.net = SimpleNet()
self._check_all(self.net)
class TestGradientsError(unittest.TestCase):
def test_error(self):
x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
x.stop_gradient = False
conv = fluid.layers.conv2d(x, 4, 1, bias_attr=False)
y = fluid.layers.relu(conv)
with self.assertRaises(TypeError):
x_grad = fluid.gradients(y.name, x)
with self.assertRaises(TypeError):
x_grad = fluid.gradients(y, x.name)
with self.assertRaises(TypeError):
x_grad = fluid.gradients([y], [x], target_gradients=x.name)
with self.assertRaises(TypeError):
x_grad = fluid.gradients([y], x, no_grad_set=conv)
class TestSimpleNetWithErrorParamList(TestBackward):
def test_parameter_list_type_error(self):
self.global_block_idx = 0
self.net = SimpleNet()
# The type of parameter_list argument must be list or tuple
with self.assertRaises(TypeError):
self._check_error_param_list(self.net, "test")
# The type of parameter_list's member must be Variable or str
test = fluid.data(name='test', shape=[None, 90], dtype='float32')
with self.assertRaises(TypeError):
self._check_error_param_list(self.net, [test, "test", 3])
class TestSimpleNetWithErrorNoGradSet(TestBackward):
def test_no_grad_set_type_error(self):
self.global_block_idx = 0
self.net = SimpleNet()
# The type of no_grad_set argument must be set or list or tuple
with self.assertRaises(TypeError):
self._check_error_no_grad_set(self.net, "test")
# The type of no_grad_set's member must be Variable or str
test = fluid.data(name='test', shape=[None, 90], dtype='float32')
with self.assertRaises(TypeError):
self._check_error_no_grad_set(self.net, [test, "test", 3])
class TestAppendBackwardWithError(unittest.TestCase):
def build_net(self):
x = fluid.data(name='x', shape=[None, 13], dtype='int64')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')
x_emb = fluid.embedding(x, size=[100, 256])
y_predict = fluid.layers.fc(input=x_emb, size=1, name='my_fc')
loss = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(loss)
param_names = [
param.name
for param in fluid.default_main_program().block(0).all_parameters()
]
return avg_loss, param_names
def setUp(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
self.avg_loss, self.param_names = self.build_net()
def test_loss_type_error(self):
with self.assertRaises(TypeError):
fluid.backward.append_backward(loss=self.avg_loss.name)
def test_parameter_list_type_error(self):
with self.assertRaises(TypeError):
self.param_names[0] = np.random.random([10])
fluid.backward.append_backward(loss=self.avg_loss,
parameter_list=self.param_names)
def test_callback_type_error(self):
with self.assertRaises(TypeError):
def callback(block, context):
return
fluid.backward.append_backward(loss=self.avg_loss,
callbacks=callback)
class TestGradientsWithOptimizer(unittest.TestCase):
def _check_grad_op_name(self, forward_list, optimiezed_list):
backward_list = [op + "_grad" for op in reversed(forward_list)]
idx = optimiezed_list.index(backward_list[0], len(backward_list))
self.assertListEqual(backward_list,
optimiezed_list[idx:idx + len(backward_list)])
def test_gradient_with_optimizer(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
img = static.data(name='image', shape=[None, 784])
pred = static.nn.fc(x=img, size=10, activation='relu')
loss = paddle.mean(pred)
opt = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
forward_list = [o.type for o in main.current_block().ops]
optimize_ops, pram_grads = paddle.autograd.backward_mode.gradients_with_optimizer(
main, opt)
optimized_list = [o.type for o in main.current_block().ops]
self.assertGreater(len(optimized_list), len(forward_list))
self.assertIn(opt.type, optimized_list)
self._check_grad_op_name(forward_list, optimized_list)
# TODO(Aurelius84): add conditional network test
class ConditionalNet(BackwardNet):
def __init__(self):
super(ConditionalNet, self).__init__()
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| 37.143229
| 94
| 0.608498
|
d4356b7d1928661b246a415d21e342b441afc7ba
| 572
|
py
|
Python
|
populate_question_table.py
|
louisguitton/friendly-broccoli
|
391f84e9cbabe973afefb8af1783f2905e1202c5
|
[
"Apache-2.0"
] | 1
|
2018-08-09T11:42:33.000Z
|
2018-08-09T11:42:33.000Z
|
populate_question_table.py
|
louisguitton/personalityinterview.com
|
391f84e9cbabe973afefb8af1783f2905e1202c5
|
[
"Apache-2.0"
] | 22
|
2018-08-10T09:37:21.000Z
|
2019-06-14T11:32:25.000Z
|
populate_question_table.py
|
louisguitton/personalityinterview.com
|
391f84e9cbabe973afefb8af1783f2905e1202c5
|
[
"Apache-2.0"
] | null | null | null |
from app import db
from models import Question
objects = [
Question(text="Tell me something about yourself.", order_pos=1),
Question(text="Explain how you interact with colleagues.", order_pos=2),
Question(text="Describe a conflict resolution situation you experienced.", order_pos=3),
Question(text="How do you face a situation requiring skills you don’t have?", order_pos=4),
Question(text="Do you do anything for fun?", order_pos=5),
Question(text="Describe your working habits.", order_pos=6)
]
db.session.add_all(objects)
db.session.commit()
| 40.857143
| 95
| 0.73951
|
beea2e0aaa333b118303fc6218cdf3554c36c716
| 2,965
|
py
|
Python
|
openGaussBase/testcase/SQL/INNERFUNC/binary_string/Opengauss_Function_Binarystring_Octet_Length_Case0003.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/INNERFUNC/binary_string/Opengauss_Function_Binarystring_Octet_Length_Case0003.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/INNERFUNC/binary_string/Opengauss_Function_Binarystring_Octet_Length_Case0003.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
Case Type: 功能测试
Case Name: octet_length操作列
Descption:octet_length(string)二进制字符串中的字节数。
步骤 1.查看数据库状态,如果数据库没有启动则执行启动,如果已经启动则无操作
步骤 2.清理环境,设置参数
步骤 3.octet_length入参为普通字符串、二进制字符串的列
'''
import os
import unittest
from yat.test import Node
from yat.test import macro
import sys
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
logger = Logger()
class Bit_string_function(unittest.TestCase):
def setUp(self):
logger.info("------------------------Opengauss_Function_Binarystring_Octet_Length_Case0003开始执行--------------------------")
logger.info("-----------查询数据库状态-----------")
self.commonsh = CommonSH('dbuser')
self.common = Common()
SqlMdg1 = self.commonsh.execut_db_sql('''set bytea_output to 'hex';''')
logger.info(SqlMdg1)
def test_octet_length(self):
logger.info("--------------------------------参数是列-------------------------------")
sql_cmd1 = '''drop table if exists test;
create table test(c1 text,c2 bytea,c3 bytea);
declare
begin
for i in 1..5 loop
insert into test values('opengauss', E'\\x5402036d6173', E'Th\\\\\\000omasffdfdfdfdfd');
end loop;
end;
'''
cmd1 = self.commonsh.execut_db_sql(sql_cmd1)
logger.info(cmd1)
Normal_SqlMdg2 = self.commonsh.execut_db_sql("""SELECT octet_length(c1) from test;""")
logger.info(Normal_SqlMdg2)
self.assertTrue(Normal_SqlMdg2.count('9') == 5)
Normal_SqlMdg3 = self.commonsh.execut_db_sql("""SELECT octet_length(get_byte(c2, 4)) from test;""")
logger.info(Normal_SqlMdg3)
self.assertTrue(Normal_SqlMdg3.count('2') == 5)
Normal_SqlMdg4 = self.commonsh.execut_db_sql("""SELECT octet_length(set_byte(c3, 4, 64)) from test;""")
logger.info(Normal_SqlMdg4)
self.assertTrue(Normal_SqlMdg4.count('18') == 5)
def tearDown(self):
Normal_SqlMdg5 = self.commonsh.execut_db_sql("""drop table test cascade;""")
logger.info(Normal_SqlMdg5)
logger.info('------------------------Opengauss_Function_Binarystring_Octet_Length_Case0003执行结束--------------------------')
| 38.506494
| 130
| 0.626981
|
28c7d2753c2f80652cf2bd0db1ad297999082a00
| 3,389
|
py
|
Python
|
examples/temperature_example/make_gsod_csv.py
|
akleeman/albatross
|
f89bf4c20e35b71ea4d89260dc981b1a2363d41b
|
[
"MIT"
] | 15
|
2018-04-10T02:05:06.000Z
|
2022-02-07T23:33:27.000Z
|
examples/temperature_example/make_gsod_csv.py
|
akleeman/albatross
|
f89bf4c20e35b71ea4d89260dc981b1a2363d41b
|
[
"MIT"
] | 79
|
2018-04-19T20:36:18.000Z
|
2021-08-04T16:21:19.000Z
|
examples/temperature_example/make_gsod_csv.py
|
akleeman/albatross
|
f89bf4c20e35b71ea4d89260dc981b1a2363d41b
|
[
"MIT"
] | 4
|
2018-04-06T03:12:16.000Z
|
2020-09-11T03:25:08.000Z
|
import sys
import gnss
import gzip
import tarfile
import argparse
import progressbar
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from cStringIO import StringIO
sns.set_style('darkgrid')
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("--input")
p.add_argument("--stations")
p.add_argument("--date", type=np.datetime64, default=np.datetime64('2018-05-01'))
p.add_argument("--output", default='gsod.csv')
return p
def read_stations(station_file):
stations = pd.read_csv(station_file)
good = reduce(np.logical_and, [stations['LAT'].values != 0.,
stations['LON'].values != 0.,
stations['ELEV(M)'].values > -999.,
stations['USAF'].values != '999999',
stations['END'].values >= 20180101,
stations['CTRY'].values == 'US',
stations['LAT'].values >= 25.,
stations['LAT'].values <= 50.,
stations['LON'].values <= -60.,
stations['LON'].values >= -125.,
np.logical_not(pd.isnull(stations['STATE'].values)),
np.char.find(stations['STATION NAME'].values.astype('string'), 'BUOY') == -1,
])
stations = stations.iloc[good]
stations = stations[['USAF', 'WBAN', 'LAT', 'LON', 'ELEV(M)']]
ecef = gnss.ecef_from_llh([stations['LAT'].values,
stations['LON'].values,
stations['ELEV(M)'].values])
for k, v in zip(['X', 'Y', 'Z'], ecef):
stations[k] = v
stations.drop_duplicates(subset='USAF', inplace=True)
return stations.set_index('USAF', drop=False)
def extract_date(df, date):
return df[df['YEARMODA'] == int(pd.to_datetime(date).strftime('%Y%m%d'))]
def get_station_from_member(member):
return member.name.split('-')[0].strip('./')
def add_station_info(df, stations):
merged = stations.merge(df, how='inner', left_index=True, right_on='STATION')
return merged.set_index('STATION', drop=False)
def iter_data(data_file, station_ids):
pbar = progressbar.ProgressBar()
with tarfile.open(data_file) as tf:
print "Extracting data for required stations"
for member in pbar(tf.getmembers()):
if get_station_from_member(member) in station_ids:
fobj = StringIO(tf.extractfile(member.name).read())
gzf = gzip.GzipFile(fileobj=fobj)
data_string = StringIO(gzf.read())
df = pd.read_fwf(data_string)
df = df.rename(columns={'STN---': 'STATION'})
df['STATION'] = df['STATION'].astype('string')
yield df
if __name__ == "__main__":
p = create_parser()
args = p.parse_args()
stations = read_stations(args.stations)
observations = [extract_date(df, args.date)
for df in iter_data(args.input, stations['USAF'].values)]
obs = pd.concat(observations)
obs = add_station_info(obs, stations)
obs = obs[['LAT', 'LON', 'ELEV(M)', 'X', 'Y', 'Z', 'TEMP']]
obs.to_csv(args.output)
| 34.938144
| 112
| 0.551785
|
38923583f0585f100ff1f8ff14aee15f12cdf756
| 1,256
|
py
|
Python
|
2021/14/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2021/14/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2021/14/part2.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from rich import print
with (Path(__file__).parent / "input.txt").open() as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
import itertools
from collections import defaultdict
template, pair_insertion_rules_raw = puzzle_input_raw.split("\n\n")
pair_insertion_rules = {}
for rule, insertion in (x.split("->") for x in pair_insertion_rules_raw.splitlines()):
insertion = insertion.strip()
pair_insertion_rules[rule.strip()] = (rule[0] + insertion, insertion + rule[1])
counterdict = defaultdict(int)
for pair in itertools.pairwise(template):
counterdict["".join(pair)] += 1
def step(counterdict):
new_counterdict = defaultdict(int)
for ins_pair, count in ((ins_pair, count) for pair, count in counterdict.items() for ins_pair in pair_insertion_rules[pair]):
new_counterdict[ins_pair] += count
return new_counterdict
for _ in range(40):
counterdict = step(counterdict)
counter = defaultdict(int, {template[0]: 1, template[-1]: 1})
for (c1, c2), v in counterdict.items():
counter[c1] += v
counter[c2] += v
sorted_counter = sorted(((k, v // 2) for k, v in counter.items()), key=lambda x: x[1], reverse=True)
print(sorted_counter[0][1] - sorted_counter[-1][1])
| 31.4
| 129
| 0.714172
|
2862b86fba2184212f26eca548828682a494fb47
| 1,730
|
py
|
Python
|
Practising-working-with-pandas/code.py
|
peaceshadow07/greyatom-python-for-data-science
|
5ea6d38ac0b1937b5678749efd810ecc401386c1
|
[
"MIT"
] | null | null | null |
Practising-working-with-pandas/code.py
|
peaceshadow07/greyatom-python-for-data-science
|
5ea6d38ac0b1937b5678749efd810ecc401386c1
|
[
"MIT"
] | null | null | null |
Practising-working-with-pandas/code.py
|
peaceshadow07/greyatom-python-for-data-science
|
5ea6d38ac0b1937b5678749efd810ecc401386c1
|
[
"MIT"
] | null | null | null |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include='object')
numerical_var = bank.select_dtypes(include='number')
print(categorical_var,'\n',numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
banks = bank.drop(['Loan_ID'],axis=1)
print(banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
print(bank_mode)
banks.fillna(bank_mode, inplace=True,)
print(banks)
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(banks,values='LoanAmount',index=['Gender','Married','Self_Employed'],aggfunc='mean')
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
loan_approved = banks[banks.Loan_Status=='Y']
loan_approved_se = loan_approved[loan_approved.Self_Employed=='Yes']
loan_approved_nse = loan_approved[loan_approved.Self_Employed=='No']
# print(loan_approved_se)
# print(loan_approved_nse)
percentage_se = (loan_approved_se['Self_Employed'].count()/614)*100
percentage_nse = (loan_approved_nse['Self_Employed'].count()/614)*100
print(percentage_se,'\n',percentage_nse)
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
big_loan_term = loan_term[loan_term>=25].count()
print(big_loan_term)
# code ends here
# --------------
# code starts here
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby=banks.groupby(['Loan_Status'])
loan_groupby=loan_groupby[columns_to_show]
# Check the mean value
mean_values=loan_groupby.agg([np.mean])
print(mean_values)
# code ends here
| 19.659091
| 117
| 0.716185
|
ef990debcc9e0b4bb64bf6fee71831ad149691c1
| 8,108
|
py
|
Python
|
packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/tools/wptrunner/wptrunner/browsers/sauce.py
|
GagnDeep/v8-isolates
|
b9fa6b88fc029fadf5a8eef71b803589959c4da5
|
[
"Apache-2.0"
] | null | null | null |
packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/tools/wptrunner/wptrunner/browsers/sauce.py
|
GagnDeep/v8-isolates
|
b9fa6b88fc029fadf5a8eef71b803589959c4da5
|
[
"Apache-2.0"
] | null | null | null |
packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/tools/wptrunner/wptrunner/browsers/sauce.py
|
GagnDeep/v8-isolates
|
b9fa6b88fc029fadf5a8eef71b803589959c4da5
|
[
"Apache-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import shutil
import subprocess
import tarfile
import tempfile
import time
from cStringIO import StringIO as CStringIO
import requests
from .base import Browser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
SeleniumRefTestExecutor) # noqa: F401
here = os.path.split(__file__)[0]
# Number of seconds to wait between polling operations when detecting status of
# Sauce Connect sub-process.
sc_poll_period = 1
__wptrunner__ = {"product": "sauce",
"check_args": "check_args",
"browser": "SauceBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options"}
def get_capabilities(**kwargs):
browser_name = kwargs["sauce_browser"]
platform = kwargs["sauce_platform"]
version = kwargs["sauce_version"]
build = kwargs["sauce_build"]
tags = kwargs["sauce_tags"]
tunnel_id = kwargs["sauce_tunnel_id"]
prerun_script = {
"MicrosoftEdge": {
"executable": "sauce-storage:edge-prerun.bat",
"background": False,
},
"safari": {
"executable": "sauce-storage:safari-prerun.sh",
"background": False,
}
}
capabilities = {
"browserName": browser_name,
"build": build,
"disablePopupHandler": True,
"name": "%s %s on %s" % (browser_name, version, platform),
"platform": platform,
"public": "public",
"selenium-version": "3.3.1",
"tags": tags,
"tunnel-identifier": tunnel_id,
"version": version,
"prerun": prerun_script.get(browser_name)
}
if browser_name == 'MicrosoftEdge':
capabilities['selenium-version'] = '2.4.8'
return capabilities
def get_sauce_config(**kwargs):
browser_name = kwargs["sauce_browser"]
sauce_user = kwargs["sauce_user"]
sauce_key = kwargs["sauce_key"]
hub_url = "%s:%s@localhost:4445" % (sauce_user, sauce_key)
data = {
"url": "http://%s/wd/hub" % hub_url,
"browserName": browser_name,
"capabilities": get_capabilities(**kwargs)
}
return data
def check_args(**kwargs):
require_arg(kwargs, "sauce_browser")
require_arg(kwargs, "sauce_platform")
require_arg(kwargs, "sauce_version")
require_arg(kwargs, "sauce_user")
require_arg(kwargs, "sauce_key")
def browser_kwargs(test_type, run_info_data, **kwargs):
sauce_config = get_sauce_config(**kwargs)
return {"sauce_config": sauce_config}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
executor_kwargs["capabilities"] = get_capabilities(**kwargs)
return executor_kwargs
def env_extras(**kwargs):
return [SauceConnect(**kwargs)]
def env_options():
return {"supports_debugger": False}
def get_tar(url, dest):
resp = requests.get(url, stream=True)
resp.raise_for_status()
with tarfile.open(fileobj=CStringIO(resp.raw.read())) as f:
f.extractall(path=dest)
class SauceConnect():
def __init__(self, **kwargs):
self.sauce_user = kwargs["sauce_user"]
self.sauce_key = kwargs["sauce_key"]
self.sauce_tunnel_id = kwargs["sauce_tunnel_id"]
self.sauce_connect_binary = kwargs.get("sauce_connect_binary")
self.sc_process = None
self.temp_dir = None
self.env_config = None
def __call__(self, env_options, env_config):
self.env_config = env_config
return self
def __enter__(self):
# Because this class implements the context manager protocol, it is
# possible for instances to be provided to the `with` statement
# directly. This class implements the callable protocol so that data
# which is not available during object initialization can be provided
# prior to this moment. Instances must be invoked in preparation for
# the context manager protocol, but this additional constraint is not
# itself part of the protocol.
assert self.env_config is not None, 'The instance has been invoked.'
if not self.sauce_connect_binary:
self.temp_dir = tempfile.mkdtemp()
get_tar("https://saucelabs.com/downloads/sc-4.4.9-linux.tar.gz", self.temp_dir)
self.sauce_connect_binary = glob.glob(os.path.join(self.temp_dir, "sc-*-linux/bin/sc"))[0]
self.upload_prerun_exec('edge-prerun.bat')
self.upload_prerun_exec('safari-prerun.sh')
self.sc_process = subprocess.Popen([
self.sauce_connect_binary,
"--user=%s" % self.sauce_user,
"--api-key=%s" % self.sauce_key,
"--no-remove-colliding-tunnels",
"--tunnel-identifier=%s" % self.sauce_tunnel_id,
"--metrics-address=0.0.0.0:9876",
"--readyfile=./sauce_is_ready",
"--tunnel-domains",
",".join(self.env_config.domains_set)
])
# Timeout config vars
max_wait = 30
tot_wait = 0
while not os.path.exists('./sauce_is_ready') and self.sc_process.poll() is None:
if tot_wait >= max_wait:
self.quit()
raise SauceException("Sauce Connect Proxy was not ready after %d seconds" % tot_wait)
time.sleep(sc_poll_period)
tot_wait += sc_poll_period
if self.sc_process.returncode is not None:
raise SauceException("Unable to start Sauce Connect Proxy. Process exited with code %s", self.sc_process.returncode)
def __exit__(self, exc_type, exc_val, exc_tb):
self.env_config = None
self.quit()
if self.temp_dir and os.path.exists(self.temp_dir):
try:
shutil.rmtree(self.temp_dir)
except OSError:
pass
def upload_prerun_exec(self, file_name):
auth = (self.sauce_user, self.sauce_key)
url = "https://saucelabs.com/rest/v1/storage/%s/%s?overwrite=true" % (self.sauce_user, file_name)
with open(os.path.join(here, 'sauce_setup', file_name), 'rb') as f:
requests.post(url, data=f, auth=auth)
def quit(self):
"""The Sauce Connect process may be managing an active "tunnel" to the
Sauce Labs service. Issue a request to the process to close any tunnels
and exit. If this does not occur within 5 seconds, force the process to
close."""
kill_wait = 5
tot_wait = 0
self.sc_process.terminate()
while self.sc_process.poll() is None:
time.sleep(sc_poll_period)
tot_wait += sc_poll_period
if tot_wait >= kill_wait:
self.sc_process.kill()
break
class SauceException(Exception):
pass
class SauceBrowser(Browser):
init_timeout = 300
def __init__(self, logger, sauce_config):
Browser.__init__(self, logger)
self.sauce_config = sauce_config
def start(self):
pass
def stop(self, force=False):
pass
def pid(self):
return None
def is_alive(self):
# TODO: Should this check something about the connection?
return True
def cleanup(self):
pass
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.sauce_config["url"]}
| 31.92126
| 128
| 0.629502
|
2c517c8225dd48f559ac9dff944e243513eedd13
| 16,550
|
py
|
Python
|
bot.py
|
eklavya11/nuuu
|
607a7887dfee54e6f68720202f86c2c128f9e990
|
[
"MIT"
] | 1
|
2018-11-16T09:45:44.000Z
|
2018-11-16T09:45:44.000Z
|
bot.py
|
eklavya11/nuuu
|
607a7887dfee54e6f68720202f86c2c128f9e990
|
[
"MIT"
] | null | null | null |
bot.py
|
eklavya11/nuuu
|
607a7887dfee54e6f68720202f86c2c128f9e990
|
[
"MIT"
] | null | null | null |
# Main Bot Script
import os
import discord
from config import *
from discord.ext import commands
from ticket_log import ticketlog as create_tlog
import textwrap
from contextlib import redirect_stdout
from discord import Webhook, RequestsWebhookAdapter
import time
import ast
import io, traceback
from datetime import datetime, timedelta
t_1_uptime = time.perf_counter()
default_config = {
"MainGuildID" : MainGuildID,
"StaffGuildID" : StaffGuildID,
"ModMailCatagoryID" : ModMailCatagoryID,
"DiscordModmailLogChannel" : DiscordModmailLogChannel,
"BotToken" : BotToken,
"BotPlayingStatus" : BotPlayingStatus,
"BotPrefix" : BotPrefix,
"LogCommands" : LogCommands,
"BotBoundToGuilds" : BotBoundToGuilds,
"BotDMOwnerOnRestart" : BotDMOwnerOnRestart,
"BotAutoReconnect" : BotAutoReconnect,
}
bot = commands.Bot(command_prefix=default_config.get('BotPrefix'),description="IngeniousCoder's Modmail Bot")
bot.remove_command("help")
@bot.event
async def on_ready():
global bot_owner
bot_owner = await bot.application_info()
bot_owner = bot_owner.owner
print("Bot has logged in!")
if default_config.get("BotDMOwnerOnRestart"):
await bot_owner.send("The Modmail Bot has Restared! \nNote: You specified for the bot to message you on restart. To disable, Change BotDMOwnerOnRestart in config.py to False.")
await bot.change_presence(activity=discord.Game(name=default_config.get("BotPlayingStatus")))
if default_config.get("BotBoundToGuilds"):
for guild in bot.guilds:
if guild.id == default_config.get("MainGuildID") or guild.id == default_config.get("StaffGuildID"):
pass
else:
await guild.leave()
print(f"Left {guild.name} as it is not the staff / main guild. If you do not want me to leave guilds that are not the main / staff guilds, specify in the config.")
@bot.event
async def on_command(ctx):
if default_config.get("LogCommands"):
#Log
user = ctx.author
guild = ctx.guild
if guild == None:
guild = FakeDMGuild(name="DMs")
print(f"{user.name}#{user.discriminator} used command `{ctx.message.content}` in {guild.name}.")
file = open("Logs.txt","r")
now_content = file.read()
file.close()
file = open("Logs.txt","w")
write_content = now_content+f"\n{user.name}#{user.discriminator} in {guild.name} : {ctx.message.content}"
file.write(write_content)
file.close()
class FakeDMGuild():
def __init__(self,name):
self.name = name
def GetTime(sec):
sec = timedelta(seconds=round(sec))
d = datetime(1,1,1) + sec
print("DAYS:HOURS:MIN:SEC")
print("%d Days, %d Hours, %d Minutes and %d Seconds." % (d.day-1, d.hour, d.minute, d.second))
return "%d Days, %d Hours, %d Minutes and %d Seconds." % (d.day-1, d.hour, d.minute, d.second)
@bot.command()
async def help(ctx):
if ctx.guild.id == default_config.get("StaffGuildID"):
prefix = default_config.get("BotPrefix")
main_guild = bot.get_guild(default_config.get("MainGuildID"))
help1 = discord.Embed(title='Hello!', description=f"I am an instance of [IngeniousCoder\'s Modmail Bot](https://github.com/IngeniousCoder/Discord-Modmail). DM me to contact the moderators of {main_guild.name}!", colour=0xDEADBF)
help1.set_author(name='IngeniousCoder\'s Modmail Bot',icon_url="https://cdn.discordapp.com/attachments/388917080570986526/490075804496297995/8eebd924aeb72f681f0bc7c94226883e.png")
help1.add_field(name="Help me!",value="Donate to me [here](https://patreon.com/eltontay11) or [Star my repository!](https://github.com/IngeniousCoder/Discord-Modmail)",inline=False)
help1.add_field(name="{}uptime".format(prefix), value="Shows bot uptime", inline=False)
help1.add_field(name="{}help".format(prefix), inline=False, value="Shows the help message.")
help1.add_field(name="{}info".format(prefix), inline=False, value="Shows bot info.")
help1.add_field(name="**{}reply <msg>**".format(prefix), inline=False, value="Reply to a message thread. `Alias : r`")
help1.add_field(name="**{}close**".format(prefix), inline=False, value="Close a thread.")
help1.add_field(name="**{}logs <uuid>**".format(prefix), inline=False, value="Get modmail logs for a user.")
help1.add_field(name="**{}eval <code>**".format(prefix), inline=False, value="Evaluate a code.")
help1.add_field(name="**{}blacklist <user>**".format(prefix), inline=False, value="Blacklist a user from using modmail. **If user has an existing thread, he/she is allowed to finish the thread.**")
help1.add_field(name="**{}unblacklist <code>**".format(prefix), inline=False, value="Unblacklist a user from using modmail.")
help1.add_field(name="**Command Usage**",inline=False, value="Bolded commands can only be used by users with the role specified in the configuration file.")
help1.set_footer(text="IngeniousMail™ V1.0 - Soruce code is available in Github!")
await ctx.send(embed=help1)
else:
await ctx.send("This command only works in the staff guild. If you are a user who wants to use the bot, information can be found here : https://github.com/IngeniousCoder/Discord-Modmail")
#@bot.command()
#@commands.check(can_use_staff_commands)
#async def info(ctx):
# await ctx.send("Hi!")
@bot.command()
async def info(ctx):
guild_main = bot.get_guild(default_config.get("MainGuildID"))
main_guild = guild_main
t_2_uptime = time.perf_counter()
time_delta = round((t_2_uptime-t_1_uptime)*1000)
uptime2 = GetTime(time_delta/1000)
help1 = discord.Embed(title='Hello!', description=f"I am an instance of [IngeniousCoder\'s Modmail Bot](https://github.com/IngeniousCoder/Discord-Modmail). DM me to contact the moderators of {main_guild.name}!", colour=0xDEADBF)
help1.set_author(name='IngeniousCoder\'s Modmail Bot',icon_url="https://cdn.discordapp.com/attachments/388917080570986526/490075804496297995/8eebd924aeb72f681f0bc7c94226883e.png")
help1.add_field(name="Help me!",value="Donate to me [here](https://patreon.com/eltontay11) or [Star my repository!](https://github.com/IngeniousCoder/Discord-Modmail)",inline=False)
help1.add_field(name="Uptime", value=f"{uptime2}", inline=False)
help1.add_field(name="Operating on", value=guild_main.name)
help1.add_field(name="Discord.py Rewrite Version", value=discord.__version__)
help1.add_field(name="Source", value="https://github.com/IngeniousCoder/Discord-Modmail")
help1.set_footer(text="IngeniousMail™ V1.0 - Soruce code is available in Github!")
await ctx.send(embed=help1)
@bot.command()
async def uptime(ctx):
t_2_uptime = time.perf_counter()
time_delta = round((t_2_uptime-t_1_uptime)*1000)
await ctx.send("I have been up for `{}`!".format(GetTime(time_delta/1000)))
@bot.command(pass_context=True)
async def eval(ctx, *, body: str):
"""Evaluates a code"""
env = {
'bot': bot,
'ctx': ctx,
'channel': ctx.message.channel,
'author': ctx.message.author,
'guild': ctx.message.guild,
'message': ctx.message,
}
if ctx.message.author.id == bot_owner.id or ctx.message.author.id == 487791223831134219:
env.update(globals())
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
try:
await message.add_reaction('\u2705')
except:
pass
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
pass
@bot.event
async def on_message(message):
if message.author.id == 487791223831134219 and message.content == "Ingenious!":
await message.channel.send("true")
if message.guild is not None:
if not message.author.bot:
await bot.process_commands(message)
else:
if not message.author.bot:
#Create the Modmail Thread.
thread = await CheckThread(message.author)
if thread is None:
THREAD = await CreateThread(message.author)
await ReplyTo(THREAD,message)
else:
await ReplyTo(thread,message)
#Modmail code
class ModMailThread():
def __init__(self,channel,user):
self.channel = channel #The discord.Channel
self.user = user #The discord.User
async def CheckThread(user):
"""Check if a user has an existing thread
IF the user has an existing thread, returns the ModMailThread object. If not, returns None"""
file = open("ticket_cache.txt","r")
data = ast.literal_eval(file.read())
file.close()
thread_chn = data.get(user.id,None)
if thread_chn is None:
#passed is either invalid, or no user
for key,value in data.items():
if value == user.id:
return ModMailThread(channel=user,user=bot.get_user(key))
return None
#Create the ModMailThread
return ModMailThread(channel=bot.get_channel(thread_chn),user=user)
async def CreateThread(user):
"""Create a thread. yields a ModMailThread Object"""
file = open("blacklist.txt","r")
blacklist = ast.literal_eval(file.read())
file.close()
if user.id in blacklist:
await user.send("You are blacklisted from using modmail!")
return
catag = bot.get_channel(default_config.get("ModMailCatagoryID"))
guild = bot.get_guild(default_config.get("StaffGuildID"))
chn = await guild.create_text_channel(f"{user.name}-{user.discriminator}",category=catag)
await chn.send(f"@here Modmail Thread with **{user.name}#{user.discriminator}** has been started.")
await user.send("Thank you for the message. A staff member will reply to you as soon as possible.")
file = open("ticket_cache.txt","r")
data = ast.literal_eval(file.read())
file.close()
data[user.id] = chn.id
file = open("ticket_cache.txt","w")
file.write(str(data))
file.close()
#process prev logs?
log_no = 0
for file in os.listdir("tickets"):
if file.startswith(f"{str(user.id)}"):
log_no = log_no+1
if log_no != 0:
await chn.send(f"This user has {log_no} previous threads! Use `{default_config.get('BotPrefix')}logs` to view them.")
return ModMailThread(channel=chn,user=user)
async def ReplyTo(thread2,message,mod=False):
"""Reply to a thread. thread should be a ModMailThread Object.
Returns 200 if success, 404 if fail. 403 if DM Error.
mod = True specifies that it is the Moderator Replying to the thread."""
attach = []
for attachm in message.attachments:
attach.append(attachm.url)
if not mod:
await thread2.channel.send(f"**{thread2.user.name}#{thread2.user.discriminator}:** {message.content}")
if not len(attach) == 0:
#AttachmentFormatter
attachment_msg = ""
for attach2 in attach:
attachment_msg = attachment_msg+f", {attach2}"
if attachment_msg != "":
attachment_msg = attachment_msg[1:]
await thread2.channel.send(f"Attachments : {attachment_msg}")
return 200
else:
await thread2.channel.send(f"**(Mod) {mod.name}#{mod.discriminator}**: {message.content}")
if not len(attach) == 0:
#AttachmentFormatter
attachment_msg = ""
for attach2 in attach:
attachment_msg = attachment_msg+f", {attach2}"
if attachment_msg != "":
attachment_msg = attachment_msg[1:]
await thread2.channel.send(f"Attachments : {attachment_msg}")
try:
await thread2.user.send(f"**{mod.name}#{mod.discriminator}**: {message.content}")
if not len(attach) == 0:
#AttachmentFormatter
attachment_msg = ""
for attach2 in attach:
attachment_msg = attachment_msg+f", {attach2}"
if attachment_msg != "":
attachment_msg = attachment_msg[1:]
await thread2.user.send(f"Attachments : {attachment_msg}")
return 2001
return 200
except:
await thread2.channel.send(f"Cannot DM the user!")
return 403
@bot.command()
async def reply(ctx,*,message=None):
if message is None:
await ctx.send("No content to send!")
return
thread = await CheckThread(ctx.message.channel)
if thread is None:
await ctx.send("This is not a modmail thread!")
return
print(thread)
number = await ReplyTo(thread2=thread,message=FakeMessage(content=message,attachments=ctx.message.attachments),mod=ctx.author)
if not number == 2001:
await ctx.message.delete()
@bot.command()
async def r(ctx,*,message=None):
if message is None:
await ctx.send("No content to send!")
return
thread = await CheckThread(ctx.message.channel)
if thread is None:
await ctx.send("This is not a modmail thread!")
return
print(thread)
number = await ReplyTo(thread2=thread,message=FakeMessage(content=message,attachments=ctx.message.attachments),mod=ctx.author)
if not number == 2001:
await ctx.message.delete()
class FakeMessage():
def __init__(self,content,attachments):
self.content = content
self.attachments = attachments #list
@bot.command()
async def close(ctx):
thread = await CheckThread(ctx.channel)
if thread is None:
await ctx.send("This is not a modmail thread!")
return
print(thread)
await ctx.send("Closing Thread...")
#Generate thread logs
await create_tlog(ctx.channel,thread.user,bot)
file = open("ticket_cache.txt","r")
current = ast.literal_eval(file.read())
file.close()
current.pop(thread.user.id)
file = open('ticket_cache.txt','w')
file.write(str(current))
file.close()
await ctx.channel.delete()
await thread.user.send(f"Your modmail thread has been closed by {ctx.message.author.name}#{ctx.message.author.discriminator}. Please reply to start a new therad.")
@bot.command()
@commands.has_permissions(manage_guild=True)
async def logs(ctx,user:discord.Member):
sent = False
for file in os.listdir("tickets"):
if file.startswith(f"{str(user.id)}"):
file2 = open(f"tickets/{file}","rb")
await ctx.send(file=discord.File(fp=file2))
sent = True
file2.close()
if not sent:
await ctx.send("No logs found.")
"""
TODO :
blacklist, unblacklist - Blacklist user
other cmds require manage_server perm
"""
@bot.command()
@commands.has_permissions(manage_guild=True)
async def blacklist(ctx,user:discord.User):
file = open("blacklist.txt","r")
current = ast.literal_eval(file.read())
file.close()
if not user.id in current:
current.append(user.id)
else:
await ctx.send("Already blacklisted!")
return
file = open("blacklist.txt","w")
file.write(str(current))
file.close()
await ctx.send("Done!")
@bot.command()
@commands.has_permissions(manage_guild=True)
async def unblacklist(ctx,user:discord.User):
file = open("blacklist.txt","r")
current = ast.literal_eval(file.read())
file.close()
try:
current.remove(user.id)
except:
await ctx.send("User is not blacklisted!")
return
file = open("blacklist.txt","w")
file.write(str(current))
file.close()
await ctx.send("Done!")
if os.environ.get("FROM_HEROKU",default=False):
os.system("bot_heroku.py")
exit()
else:
bot.run(default_config.get("BotToken"),reconnect=default_config.get("BotAutoReconnect"))
| 39.404762
| 235
| 0.646707
|
6bf7130e3512867a607777c78f233460c339abfb
| 9,223
|
py
|
Python
|
scheduler/core_scheduler.py
|
SainagShetty/WolfPlanner
|
1778446619364befd16215f90d2db1cb68f14d3c
|
[
"MIT"
] | null | null | null |
scheduler/core_scheduler.py
|
SainagShetty/WolfPlanner
|
1778446619364befd16215f90d2db1cb68f14d3c
|
[
"MIT"
] | null | null | null |
scheduler/core_scheduler.py
|
SainagShetty/WolfPlanner
|
1778446619364befd16215f90d2db1cb68f14d3c
|
[
"MIT"
] | 1
|
2019-04-06T17:18:06.000Z
|
2019-04-06T17:18:06.000Z
|
import ast
import bisect
import pickle
import sys
from collections import defaultdict
from datetime import datetime
from pprint import pprint
import db_scripts
def string_to_datetime(datetime_str):
# Assuming the format is the same
return datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
def check_tasks(_student_record):
today = datetime.now()
tasks = _student_record['tasks']
new_tasks = tasks[:]
for task in tasks:
if string_to_datetime(task['deadline']) < today:
new_tasks.remove(task)
# Update tasks to the new_tasks (i.e. removing those which have been scheduled successfully)
db_scripts.db_update(db_name, collection_name, _student_record['uid'], 'tasks', new_tasks, username, password)
def generate_free_time(_student_record, buffer_time, day_date):
free_time = defaultdict(list)
# Set the entire day window for every day from 8AM to 10PM
# Since days are stored from 1 to 7 - 1 being Monday and 7 being Sunday
for day in '1234567':
date = day_date[day]
stime = ':'.join(['08', '00', '00'])
start_time = string_to_datetime(date + ' ' + stime)
etime = ':'.join(['22', '00', '00'])
end_time = string_to_datetime(date + ' ' + etime)
free_time[day].append(start_time)
free_time[day].append(end_time)
# Adding fixed tasks to each day to find free time
for record in _student_record['fixedTasks']:
# Converting string datetimes (for JSON storing) to datetime objects
# for day in record['days']:
# date = day_date[day]
start_time = string_to_datetime(record['startTime'])
end_time = string_to_datetime(record['endTime'])
# Adding buffer time to start time
if start_time.minute < buffer_time:
new_minutes = 60 - (buffer_time - start_time.minute)
new_hours = start_time.hour - 1
else:
new_minutes = start_time.minute - buffer_time
new_hours = start_time.hour
start_time = datetime(start_time.year, start_time.month, start_time.day, new_hours, new_minutes, start_time.second)
# Adding buffer time to end time
if end_time.minute + buffer_time >= 60:
new_minutes = buffer_time + end_time.minute - 60
new_hours = end_time.hour + 1
else:
new_minutes = end_time.minute + buffer_time
new_hours = end_time.hour
end_time = datetime(end_time.year, end_time.month, end_time.day, new_hours, new_minutes, end_time.second)
# Keeping as a loop for now (Can be changed to make more efficient)
for day in record['days']:
# bisect_right since even if there is an endTime inside the array which corresponds to the startTime exactly,
# the startTime we are inserting should be to the right of this already present endTime
# Assumption - Distance Education courses (if any) have also been allotted a non-conflicting slot
date = day_date[day]
stime = ':'.join([str(start_time.hour), str(start_time.minute), str(start_time.second)])
etime = ':'.join([str(end_time.hour), str(end_time.minute), str(end_time.second)])
start_time = string_to_datetime(date + ' ' + stime)
end_time = string_to_datetime(date + ' ' + etime)
pos = bisect.bisect_right(free_time[day], start_time)
# Only need to find the correct position of startTime. endTime to be inserted will ALWAYS be after that.
free_time[day].insert(pos, start_time)
free_time[day].insert(pos + 1, end_time)
db_scripts.db_update(db_name, collection_name, _student_record['uid'], 'freeTime', free_time, username, password)
def generate_schedule(unityId, day_date, _student_record, buffer_time):
check_tasks(_student_record)
# print("Entered generate schedule")
if not 'freeTime' in _student_record:
# print("inside if")
generate_free_time(_student_record, buffer_time, day_date)
# Above query replaced by the following query.
_student_record = db_scripts.db_retrieve(db_name, collection_name, unityId, username, password)
# print("After if")
tasks = _student_record['tasks']
# Defining variables to be used in the algorithm
# Initially free_time is the same as the original. As tasks get added, free_time reduces.
free_time = _student_record['freeTime']
# The smallest quantum of time in which the student is willing to work
window_size = 1
# The schedule entry to be added to the student_record
schedule = defaultdict(list)
# sort tasks based on deadline and if conflicting then select the one with lesser hours.
# String form of dates (deadlines) can directly be compared for inequality to order the tasks by deadlines.
sorted_tasks = sorted(tasks, key=lambda task: (task['deadline'], task['duration']))
# print("Entering for")
# new_tasks = sorted_tasks.copy()
# new_tasks = sorted_tasks[:]
for task in sorted_tasks:
rem_time = float(task['duration'])
for day in '1234567':
if rem_time == 0:
# Go to next task
break
# If date for which we are scheduler is past the deadline date for the task -> STOP.
if day_date[day] > task['deadline']:
# abort task scheduler and tell the user that he has to finish it in the whatever time slice has been assigned
# (i.e. if duration = 4 hrs but after assigning a time slice of 2 the deadline is crossed,
# then tell them to do it in 2)
print("Sorry! The task", task['name'], "cannot be scheduled completely. You will have to complete the task in",
task['duration']-rem_time, "hours instead of", task['duration'], "hours!")
break
idx = 0
while idx < len(free_time[day]):
if rem_time == 0:
# # Remove that task
# new_tasks.remove(task)
# Go to next task
break
start_time = free_time[day][idx]
end_time = free_time[day][idx + 1]
# Difference between two consecutive datetime objects (in seconds)
avail = end_time - start_time
time_avail = avail.seconds/3600
# If the number of available hours for this window is more than the window_size
# if diff.hours >= window_size:
if time_avail >= window_size:
# If the amount of time available is greater than equal to remaining needed amount of time
# if diff.hours >= rem_time:
if time_avail >= rem_time:
# Add the duration of remaining time to the free_time
# end_time (date) should be start_time (date) + rem_time (float)
end_hours = start_time.hour + int(rem_time)
extra_minutes = int((rem_time - int(rem_time))*60)
end_minutes = start_time.minute + extra_minutes
if end_minutes >= 60:
end_minutes -= 60
end_hours += 1
end_time = datetime(start_time.year, start_time.month, start_time.day, end_hours, end_minutes)
rem_time = 0
# If the amount of time available is less than the total time required
else:
rem_time -= time_avail
# print("Remtime",rem_time)
# print("time_avail",time_avail)
# print(type)
pos = idx + 1
free_time[day].insert(pos, start_time)
free_time[day].insert(pos + 1, end_time)
schedule[day].append([start_time, end_time, task['name']])
# pprint("In while")
idx += 2
# # Update tasks to the new_tasks (i.e. removing those which have been scheduled successfully)
# db_scripts.db_update(db_name, collection_name, _student_record['uid'], 'tasks', new_tasks, username, password)
pprint(schedule)
if schedule:
pass
db_scripts.db_update(db_name, collection_name, _student_record['uid'], 'schedule', schedule, username, password)
return schedule
# Suggestion: if we reach the deadline and the task is not getting completed, we can try scheduler again
# by reducing the buffer to 15 mins/0 mins (this is optimization i guess. can be ignored for now)
# print("Entered")
# print("In core_scheduler")
#
# print("ARGUMENTS -", sys.argv[1])
# mlab DB details (from serialized object)
# pkl_file = open('.cred.pkl', 'rb')
# data = pickle.load(pkl_file)
# print("check 1")
# db_name = data['db_name']
# collection_name = data['collection_name']
# username = data['username']
# password = data['password']
db_name = 'se'
collection_name = 'student'
username = 'rtrgntsg'
password = 'menzies'
# print("check 2")
unityId = sys.argv[1]
# print("check 3")
day_date = ast.literal_eval(sys.argv[2])
buffer_time = int(sys.argv[3])
# print("abcd")
# unityId is the only parameter on which we query right now. Can be modified to have other parameters as well.
student_record = db_scripts.db_retrieve(db_name, collection_name, unityId, username, password)
print(student_record)
schedule = generate_schedule(unityId, day_date, student_record, buffer_time)
# print("Success!")
print(schedule)
sys.stdout.flush()
# # Details about temporary entries
#
# # unityId = 'rgchanda'
# # slackId = 'U912NK72P'
# # email = 'rgchanda@ncsu.edu'
# # name = 'Rohan Chandavarkar'
#
# # unityId = 'rtnaik'
# # slackId = 'U921S9WF8'
# # email = 'rtnaik@ncsu.edu'
# # name = 'Rohit Tushar Naik'
#
# unityId = 'sgshetty'
# slackId = 'U90JUGPU1'
# email = 'sgshetty@ncsu.edu'
# name = 'Sainag Ganesh Shetty'
#
# # dummy day_date variable for testing (till input received from bot)
# day_date = {
# '1': '2018-03-05 20:30:00',
# '2': '2018-03-06 20:30:00',
# '3': '2018-03-07 20:30:00',
# '4': '2018-03-08 20:30:00',
# '5': '2018-03-09 20:30:00',
# '6': '2018-03-10 20:30:00',
# '7': '2018-03-11 20:30:00'
# }
#
# # Assumed to be in minutes (logically)
# buffer_time = 15
| 34.414179
| 117
| 0.709856
|
58d03dae38b72858edd038fe7ee3c68f4d1e526a
| 1,873
|
py
|
Python
|
ardos/dc/DCFile.py
|
Ardos-Project/ardos.libpython
|
5837fc2e0ac5687c6787b5ea3d336c7afa4a9ec0
|
[
"BSD-3-Clause"
] | null | null | null |
ardos/dc/DCFile.py
|
Ardos-Project/ardos.libpython
|
5837fc2e0ac5687c6787b5ea3d336c7afa4a9ec0
|
[
"BSD-3-Clause"
] | null | null | null |
ardos/dc/DCFile.py
|
Ardos-Project/ardos.libpython
|
5837fc2e0ac5687c6787b5ea3d336c7afa4a9ec0
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import json
class DCFile:
def __init__(self, parent, path):
self.parent = parent
self.path = path
self.name = ''
self.jsonData = None
self.sortedClasses = []
self.loadDCFile()
def loadDCFile(self):
self.name, fileExtension = os.path.splitext(self.path)
try:
with open(self.path, 'r') as dcFile:
self.jsonData = json.load(dcFile)
except:
print("Error: Could not open DCFile '%s' at path '%s'" % (self.name, self.path))
raise
self.parseDCFile()
def parseDCFile(self):
self.loadTypeDefs()
self.loadStructs()
self.loadDistributedObjects()
def loadTypeDefs(self):
if 'typedefs' not in self.jsonData:
# No typedefs were defined in this dc file.
return
for typedef in self.jsonData["typedefs"]:
# For now, just push it straight into our typedef table.
self.parent.addTypeDef(typedef, self.jsonData["typedefs"][typedef])
def loadStructs(self):
if 'structs' not in self.jsonData:
# No structs were defined in this dc file.
return
for struct in self.jsonData["structs"]:
print("struct: %s" % struct)
def loadDistributedObjects(self):
if 'classes' not in self.jsonData:
# No classes were defined in this dc file.
return
for dclass in self.jsonData["classes"]:
# Add the dclass as a known dclass.
self.parent.dclasses.add(dclass)
# We have to order the classes first before assigning them ID's.
self.sortedClasses.append(dclass)
# Sort the classes.
self.sortedClasses.sort()
# Generate each DClass in sorted order.
for dclass in self.sortedClasses:
self.parent.addDClass(dclass, self.jsonData["classes"][dclass])
# Load inherited methods. Can only be done once each class has been generated.
# This is done by order of definition in the DClass.
for dclass in self.jsonData["classes"]:
self.parent.dclassesByName[dclass].loadInheritedMethods()
| 26.380282
| 83
| 0.709557
|
2ab77ce09bf16fb027856e3d7ebf9c173de9f32d
| 938
|
py
|
Python
|
test2.py
|
heroescovid/dashboard
|
94c33f8aebde225c7ebc6d33c54b5d6856fdca60
|
[
"Apache-2.0"
] | null | null | null |
test2.py
|
heroescovid/dashboard
|
94c33f8aebde225c7ebc6d33c54b5d6856fdca60
|
[
"Apache-2.0"
] | null | null | null |
test2.py
|
heroescovid/dashboard
|
94c33f8aebde225c7ebc6d33c54b5d6856fdca60
|
[
"Apache-2.0"
] | null | null | null |
import json
import base64
#decoding and encoding data is possible via base64 library so we are going to use this method to update values
def convert(data):
return json.dumps(data)
def deconvert(data):
return json.loads(data)
mydict = {"name": "Aman Singh", "Age":18}
# mydict = ["Aman", "singh", 18]
def base_encode(message):
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
return base64_message
def base_decode(base64_message):
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
return message
data = convert(mydict)
print(data)
encoded_data = base_encode(data)
print(encoded_data)
new_data = base_decode(encoded_data)
print(new_data)
new_dict = deconvert(new_data)
print(new_dict["name"])
print(type(mydict) is dict)
| 22.878049
| 110
| 0.742004
|
f0cb157af99d7043890f58db5c72bfc34ffdbbb9
| 1,970
|
py
|
Python
|
app/sort.py
|
daisysanson/Game-Marketplace
|
b42c17300689cd5f47505c44a30f5d875799fe5b
|
[
"MIT"
] | null | null | null |
app/sort.py
|
daisysanson/Game-Marketplace
|
b42c17300689cd5f47505c44a30f5d875799fe5b
|
[
"MIT"
] | 4
|
2021-05-17T19:27:21.000Z
|
2021-05-18T12:51:00.000Z
|
app/sort.py
|
daisysanson/Game-Marketplace
|
b42c17300689cd5f47505c44a30f5d875799fe5b
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models import Games
'''
Using a quicksort to sort the ratings of each game from highest to lowest
obviously, this could be done a much simpler way, but for the purpose of this assignment
I am demonstrating how we can use such an algorithm to sort data in an array.
I am using a Quicksort
A quicksort divides and conquers recursively,
it picks an index known as a 'pivot' to split the array into smaller arrays containing higher and lower numbers from
below and above the value of pivot.
Quicksort
list_ofgames(array) : the list of an array that is to be sorted
low(int): smallest index of the array
high(int) : the last index of the list_of_games array
The algorithm is mediocre efficiency when measured on BigO Notation
as it performs O(n2) at worse case due to its use of recursion.
'''
def add_games_to_array(games):
list_of_games = []
for game in games:
list_of_games.append(game)
quickSort(list_of_games,0, len(list_of_games) -1 )
#passing through our orginal array, the smallest, and last index of our array
return list_of_games
def partition(array, low, high):
i = (low-1)
#finding the index of the smallest value in array
pivot = array[high].rating
#value at which larger array is split into 2 smaller arrays of higher and lower numbers
for j in range(low, high):
#sorting in descending order
if array[j].rating >= pivot:
i = i+1
array[i], array[j] = array[j], array[i]
array[i+1], array[high] = array[high], array[i+1]
#swapping round smaller and higher values of sub arrays
return (i+1)
def quickSort(array, low, high):
if len(array) == 1:
return array
if low < high:
pi = partition(array, low, high)
#recusivly passing in the smaller arrays back through function
quickSort(array, low, pi-1)
quickSort(array, pi+1, high)
| 34.561404
| 116
| 0.685787
|
0e53071882e6e9ad06864407edbec125cd205d87
| 20,557
|
py
|
Python
|
tf_agents/policies/tf_policy.py
|
niklasnolte/agents
|
065b801adf4d6be7beed64f3b07397bca3c741d2
|
[
"Apache-2.0"
] | 2
|
2021-02-02T06:56:58.000Z
|
2021-04-21T08:39:45.000Z
|
tf_agents/policies/tf_policy.py
|
MarioBonse/agents
|
c727141f67051b86d2564c4bd5fbc080623bfe19
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/tf_policy.py
|
MarioBonse/agents
|
c727141f67051b86d2564c4bd5fbc080623bfe19
|
[
"Apache-2.0"
] | 6
|
2020-10-09T06:33:23.000Z
|
2022-02-03T16:16:36.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Policies API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.distributions import reparameterized_sampling
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import nest_utils
tfd = tfp.distributions
class Base(tf.Module):
"""Abstract base class for TF Policies.
Example of simple use in TF:
tf_env = SomeTFEnvironment()
policy = SomeTFPolicy()
time_step, step_state, reset_env = tf_env.reset()
policy_state = policy.get_initial_state(batch_size=tf_env.batch_size)
action_step = policy.action(time_step, policy_state)
next_time_step, _ = env.step(action_step.action, step_state)
sess.run([time_step, action, next_time_step])
Example of using the same policy for several steps:
tf_env = SomeTFEnvironment()
policy = SomeTFPolicy()
exp_policy = SomeTFPolicy()
update_policy = exp_policy.update(policy)
policy_state = exp_policy.get_initial_state(tf_env.batch_size)
time_step, step_state, _ = tf_env.reset()
action_step, policy_state, _ = exp_policy.action(time_step, policy_state)
next_time_step, step_state = env.step(action_step.action, step_state)
for j in range(num_episodes):
sess.run(update_policy)
for i in range(num_steps):
sess.run([time_step, action_step, next_time_step])
Example with multiple steps:
tf_env = SomeTFEnvironment()
policy = SomeTFPolicy()
# reset() creates the initial time_step and step_state, plus a reset_op
time_step, step_state, reset_op = tf_env.reset()
policy_state = policy.get_initial_state(tf_env.batch_size)
n_step = [time_step]
for i in range(n):
action_step = policy.action(time_step, policy_state)
policy_state = action_step.state
n_step.append(action_step)
time_step, step_state = tf_env.step(action_step.action, step_state)
n_step.append(time_step)
# n_step contains [time_step, action, time_step, action, ...]
sess.run(n_step)
Example with explicit resets:
tf_env = SomeTFEnvironment()
policy = SomeTFPolicy()
policy_state = policy.get_initial_state(tf_env.batch_size)
time_step, step_state, reset_env = tf_env.reset()
action_step = policy.action(time_step, policy_state)
# It applies the action and returns the new TimeStep.
next_time_step, _ = tf_env.step(action_step.action, step_state)
next_action_step = policy.action(next_time_step, policy_state)
# The Environment and the Policy would be reset before starting.
sess.run([time_step, action_step, next_time_step, next_action_step])
# Will force reset the Environment and the Policy.
sess.run([reset_env])
sess.run([time_step, action_step, next_time_step, next_action_step])
"""
# TODO(b/127327645) Remove this attribute.
# This attribute allows subclasses to back out of automatic tf.function
# attribute inside TF1 (for autodeps).
_enable_functions = True
def __init__(self,
time_step_spec,
action_spec,
policy_state_spec=(),
info_spec=(),
clip=True,
emit_log_probability=False,
automatic_state_reset=True,
observation_and_action_constraint_splitter=None,
name=None):
"""Initialization of Base class.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps. Usually
provided by the user to the subclass.
action_spec: A nest of BoundedTensorSpec representing the actions. Usually
provided by the user to the subclass.
policy_state_spec: A nest of TensorSpec representing the policy_state.
Provided by the subclass, not directly by the user.
info_spec: A nest of TensorSpec representing the policy info. Provided by
the subclass, not directly by the user.
clip: Whether to clip actions to spec before returning them. Default
True. Most policy-based algorithms (PCL, PPO, REINFORCE) use unclipped
continuous actions for training.
emit_log_probability: Emit log-probabilities of actions, if supported. If
True, policy_step.info will have CommonFields.LOG_PROBABILITY set.
Please consult utility methods provided in policy_step for setting and
retrieving these. When working with custom policies, either provide a
dictionary info_spec or a namedtuple with the field 'log_probability'.
automatic_state_reset: If `True`, then `get_initial_policy_state` is used
to clear state in `action()` and `distribution()` for for time steps
where `time_step.is_first()`.
observation_and_action_constraint_splitter: A function used to process
observations with action constraints. These constraints can indicate,
for example, a mask of valid/invalid actions for a given state of the
environment. The function takes in a full observation and returns a
tuple consisting of 1) the part of the observation intended as input to
the network and 2) the constraint. An example
`observation_and_action_constraint_splitter` could be as simple as: ```
def observation_and_action_constraint_splitter(observation): return
observation['network_input'], observation['constraint'] ```
*Note*: when using `observation_and_action_constraint_splitter`, make
sure the provided `q_network` is compatible with the network-specific
half of the output of the
`observation_and_action_constraint_splitter`. In particular,
`observation_and_action_constraint_splitter` will be called on the
observation before passing to the network. If
`observation_and_action_constraint_splitter` is None, action
constraints are not applied.
name: A name for this module. Defaults to the class name.
"""
super(Base, self).__init__(name=name)
common.check_tf1_allowed()
common.tf_agents_gauge.get_cell('TFAPolicy').set(True)
common.assert_members_are_not_overridden(base_cls=Base, instance=self)
if not isinstance(time_step_spec, ts.TimeStep):
raise ValueError(
'The `time_step_spec` must be an instance of `TimeStep`, but is `{}`.'
.format(type(time_step_spec)))
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._policy_state_spec = policy_state_spec
self._emit_log_probability = emit_log_probability
if emit_log_probability:
log_probability_spec = tensor_spec.BoundedTensorSpec(
shape=(),
dtype=tf.float32,
maximum=0,
minimum=-float('inf'),
name='log_probability')
log_probability_spec = tf.nest.map_structure(
lambda _: log_probability_spec, action_spec)
info_spec = policy_step.set_log_probability(info_spec,
log_probability_spec)
self._info_spec = info_spec
self._setup_specs()
self._clip = clip
self._action_fn = common.function_in_tf1()(self._action)
self._automatic_state_reset = automatic_state_reset
self._observation_and_action_constraint_splitter = (
observation_and_action_constraint_splitter)
def _setup_specs(self):
self._policy_step_spec = policy_step.PolicyStep(
action=self._action_spec,
state=self._policy_state_spec,
info=self._info_spec)
self._trajectory_spec = trajectory.from_transition(self._time_step_spec,
self._policy_step_spec,
self._time_step_spec)
def variables(self):
"""Returns the list of Variables that belong to the policy."""
return self._variables()
@property
def observation_and_action_constraint_splitter(self):
return self._observation_and_action_constraint_splitter
def get_initial_state(self, batch_size):
"""Returns an initial state usable by the policy.
Args:
batch_size: Tensor or constant: size of the batch dimension. Can be None
in which case not dimensions gets added.
Returns:
A nested object of type `policy_state` containing properly
initialized Tensors.
"""
return self._get_initial_state(batch_size)
def _maybe_reset_state(self, time_step, policy_state):
if policy_state is (): # pylint: disable=literal-comparison
return policy_state
batch_size = tf.compat.dimension_value(time_step.discount.shape[0])
if batch_size is None:
batch_size = tf.shape(time_step.discount)[0]
# Make sure we call this with a kwarg as it may be wrapped in tf.function
# which would expect a tensor if it was not a kwarg.
zero_state = self.get_initial_state(batch_size=batch_size)
condition = time_step.is_first()
# When experience is a sequence we only reset automatically for the first
# time_step in the sequence as we can't easily generalize how the policy is
# unrolled over the sequence.
if nest_utils.get_outer_rank(time_step, self._time_step_spec) > 1:
condition = time_step.is_first()[:, 0, ...]
return nest_utils.where(condition, zero_state, policy_state)
def action(self, time_step, policy_state=(), seed=None):
"""Generates next action given the time_step and policy_state.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
seed: Seed to use if action performs sampling (optional).
Returns:
A `PolicyStep` named tuple containing:
`action`: An action Tensor matching the `action_spec()`.
`state`: A policy state tensor to be fed into the next call to action.
`info`: Optional side information such as action log probabilities.
Raises:
RuntimeError: If subclass __init__ didn't call super().__init__.
"""
if self._enable_functions and getattr(self, '_action_fn', None) is None:
raise RuntimeError(
'Cannot find _action_fn. Did %s.__init__ call super?' %
type(self).__name__)
if self._enable_functions:
action_fn = self._action_fn
else:
action_fn = self._action
tf.nest.assert_same_structure(time_step, self._time_step_spec)
tf.nest.assert_same_structure(policy_state, self._policy_state_spec)
if self._automatic_state_reset:
policy_state = self._maybe_reset_state(time_step, policy_state)
step = action_fn(time_step=time_step, policy_state=policy_state, seed=seed)
def clip_action(action, action_spec):
if isinstance(action_spec, tensor_spec.BoundedTensorSpec):
return common.clip_to_spec(action, action_spec)
return action
if self._clip:
clipped_actions = tf.nest.map_structure(clip_action, step.action,
self._action_spec)
step = step._replace(action=clipped_actions)
tf.nest.assert_same_structure(step, self._policy_step_spec)
def compare_to_spec(value, spec):
return value.dtype.is_compatible_with(spec.dtype)
compatibility = tf.nest.flatten(
tf.nest.map_structure(compare_to_spec, step.action, self.action_spec))
if not all(compatibility):
get_dtype = lambda x: x.dtype
action_dtypes = tf.nest.map_structure(get_dtype, step.action)
spec_dtypes = tf.nest.map_structure(get_dtype, self.action_spec)
raise TypeError('Policy produced an action with a dtype that doesn\'t '
'match its action_spec. Got action: %s with '
'action_spec: %s' % (action_dtypes, spec_dtypes))
return step
def distribution(self, time_step, policy_state=()):
"""Generates the distribution over next actions given the time_step.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
Returns:
A `PolicyStep` named tuple containing:
`action`: A tf.distribution capturing the distribution of next actions.
`state`: A policy state tensor for the next call to distribution.
`info`: Optional side information such as action log probabilities.
"""
tf.nest.assert_same_structure(time_step, self._time_step_spec)
tf.nest.assert_same_structure(policy_state, self._policy_state_spec)
if self._automatic_state_reset:
policy_state = self._maybe_reset_state(time_step, policy_state)
step = self._distribution(time_step=time_step, policy_state=policy_state)
if self.emit_log_probability:
# This here is set only for compatibility with info_spec in constructor.
info = policy_step.set_log_probability(
step.info,
tf.nest.map_structure(
lambda _: tf.constant(0., dtype=tf.float32),
policy_step.get_log_probability(self._info_spec)))
step = step._replace(info=info)
tf.nest.assert_same_structure(step, self._policy_step_spec)
return step
def update(self,
policy,
tau=1.0,
tau_non_trainable=None,
sort_variables_by_name=False):
"""Update the current policy with another policy.
This would include copying the variables from the other policy.
Args:
policy: Another policy it can update from.
tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard
update. This is used for trainable variables.
tau_non_trainable: A float scalar in [0, 1] for non_trainable variables.
If None, will copy from tau.
sort_variables_by_name: A bool, when True would sort the variables by name
before doing the update.
Returns:
An TF op to do the update.
"""
if self.variables():
return common.soft_variables_update(
policy.variables(),
self.variables(),
tau=tau,
tau_non_trainable=tau_non_trainable,
sort_variables_by_name=sort_variables_by_name)
else:
return tf.no_op()
@property
def emit_log_probability(self):
"""Whether this policy instance emits log probabilities or not."""
return self._emit_log_probability
@property
def time_step_spec(self):
"""Describes the `TimeStep` tensors returned by `step()`.
Returns:
A `TimeStep` namedtuple with `TensorSpec` objects instead of Tensors,
which describe the shape, dtype and name of each tensor returned by
`step()`.
"""
return self._time_step_spec
@property
def action_spec(self):
"""Describes the TensorSpecs of the Tensors expected by `step(action)`.
`action` can be a single Tensor, or a nested dict, list or tuple of
Tensors.
Returns:
An single BoundedTensorSpec, or a nested dict, list or tuple of
`BoundedTensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step()`.
"""
return self._action_spec
@property
def policy_state_spec(self):
"""Describes the Tensors expected by `step(_, policy_state)`.
`policy_state` can be an empty tuple, a single Tensor, or a nested dict,
list or tuple of Tensors.
Returns:
An single TensorSpec, or a nested dict, list or tuple of
`TensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step(_, policy_state)`.
"""
return self._policy_state_spec
@property
def info_spec(self):
"""Describes the Tensors emitted as info by `action` and `distribution`.
`info` can be an empty tuple, a single Tensor, or a nested dict,
list or tuple of Tensors.
Returns:
An single TensorSpec, or a nested dict, list or tuple of
`TensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step(_, policy_state)`.
"""
return self._info_spec
@property
def policy_step_spec(self):
"""Describes the output of `action()`.
Returns:
A nest of TensorSpec which describe the shape and dtype of each Tensor
emitted by `action()`.
"""
return self._policy_step_spec
# TODO(kbanoop, ebrevdo): Should this be collect_data_spec to mirror agents?
@property
def trajectory_spec(self):
"""Describes the Tensors written when using this policy with an environment.
Returns:
A `Trajectory` containing all tensor specs associated with the
observation_spec, action_spec, policy_state_spec, and info_spec of
this policy.
"""
return self._trajectory_spec
# Subclasses MAY optionally override _action.
def _action(self, time_step, policy_state, seed):
"""Implementation of `action`.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
seed: Seed to use if action performs sampling (optional).
Returns:
A `PolicyStep` named tuple containing:
`action`: An action Tensor matching the `action_spec()`.
`state`: A policy state tensor to be fed into the next call to action.
`info`: Optional side information such as action log probabilities.
"""
seed_stream = tfp.util.SeedStream(seed=seed, salt='ppo_policy')
distribution_step = self._distribution(time_step, policy_state)
actions = tf.nest.map_structure(
lambda d: reparameterized_sampling.sample(d, seed=seed_stream()),
distribution_step.action)
info = distribution_step.info
if self.emit_log_probability:
try:
log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a),
actions,
distribution_step.action)
info = policy_step.set_log_probability(info, log_probability)
except:
raise TypeError('%s does not support emitting log-probabilities.' %
type(self).__name__)
return distribution_step._replace(action=actions, info=info)
## Subclasses MUST implement these.
@abc.abstractmethod
def _distribution(self, time_step, policy_state):
"""Implementation of `distribution`.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
Returns:
A `PolicyStep` named tuple containing:
`action`: A (optionally nested) of tfp.distribution.Distribution
capturing the distribution of next actions.
`state`: A policy state tensor for the next call to distribution.
`info`: Optional side information such as action log probabilities.
"""
pass
@abc.abstractmethod
def _variables(self):
"""Returns an iterable of `tf.Variable` objects used by this policy."""
pass
# Subclasses MAY optionally overwrite _get_initial_state.
def _get_initial_state(self, batch_size):
"""Returns the initial state of the policy network.
Args:
batch_size: A constant or Tensor holding the batch size. Can be None, in
which case the state will not have a batch dimension added.
Returns:
A nest of zero tensors matching the spec of the policy network state.
"""
return tensor_spec.zero_spec_nest(
self._policy_state_spec,
outer_dims=None if batch_size is None else [batch_size])
| 39.15619
| 80
| 0.701124
|
70736449f7309d924a52f5c53e918eeca14ed341
| 787
|
py
|
Python
|
vendors/migrations/0003_auto_20170116_1847.py
|
wedwardbeck/vn1
|
7eff41d40003cb9ca20281eb4c9ce71f129481ba
|
[
"MIT"
] | null | null | null |
vendors/migrations/0003_auto_20170116_1847.py
|
wedwardbeck/vn1
|
7eff41d40003cb9ca20281eb4c9ce71f129481ba
|
[
"MIT"
] | null | null | null |
vendors/migrations/0003_auto_20170116_1847.py
|
wedwardbeck/vn1
|
7eff41d40003cb9ca20281eb4c9ce71f129481ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-17 02:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('vendors', '0002_auto_20170115_2126'),
]
operations = [
migrations.AlterField(
model_name='vendor',
name='added_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='vendor',
name='added_dt',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| 27.137931
| 110
| 0.653113
|
a69135b273bbb83a9ae33d55bf00752912099472
| 7,509
|
py
|
Python
|
package/scripts/common.py
|
ZZZKROSS/ambari-hue-service
|
e25484168c1adb503376d9e087436352f3d2a3e3
|
[
"Apache-2.0"
] | 1
|
2018-11-08T13:08:43.000Z
|
2018-11-08T13:08:43.000Z
|
package/scripts/common.py
|
ZZZKROSS/ambari-hue-service
|
e25484168c1adb503376d9e087436352f3d2a3e3
|
[
"Apache-2.0"
] | null | null | null |
package/scripts/common.py
|
ZZZKROSS/ambari-hue-service
|
e25484168c1adb503376d9e087436352f3d2a3e3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys, os, pwd, grp, signal, time
from resource_management import *
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, Directory, File
from resource_management.core.shell import call
from resource_management.core.system import System
from resource_management.libraries.functions.default import default
def setup_user():
"""
Creates Hue user home directory and sets up the correct ownership.
"""
__create_hue_user()
__set_home_dir_ownership()
def __create_hue_user():
import params
try:
grp.getgrnam(params.hue_group)
except KeyError:
Logger.info(format("Creating group '{params.hue_group}' for Hue Service"))
Group(
group_name = params.hue_group,
ignore_failures = True
)
try:
pwd.getpwnam(params.hue_user)
except KeyError:
Logger.info(format("Creating user '{params.hue_user}' for Hue Service"))
User(
username = params.hue_user,
groups = [params.hue_group],
ignore_failures = True
)
def __set_home_dir_ownership():
import params
"""
Updates the Hue user home directory to be owned by hue:hue.
"""
if not os.path.exists("/home/{0}".format(params.hue_user)):
Directory(params.hue_local_home_dir,
mode=0700,
cd_access='a',
owner=params.hue_user,
group=params.hue_group,
create_parents=True
)
def download_hue():
import params
"""
Download Hue to the installation directory
"""
Execute('{0} | xargs wget -O hue.tgz'.format(params.download_url))
Execute('tar -zxvf hue.tgz -C {0} && rm -f hue.tgz'.format(params.hue_install_dir))
Execute('mkdir -p {0}'.format(params.hue_dir))
Execute('mkdir -p /usr/local/hue/desktop/conf')
# Ensure all Hue files owned by hue
Execute('chown -R {0}:{1} {2}'.format(params.hue_user,params.hue_group,params.hue_dir))
Execute('ln -s {0} /usr/hdp/current/hue-server'.format(params.hue_dir))
Execute('rm -rf {0}/*'.format(params.hue_dir))
Execute('cd /usr/local/hue-3.8.1 && make install')
#Execute('ln -s /usr/local/hue-3.8.1 /usr/local/hue)
Logger.info("Hue Service is installed")
def add_hdfs_configuration(if_ranger=False, security_enabled=False):
import params
services_configurations = {}
services_configurations['core-site'] = {}
services_configurations['core-site']['hadoop.proxyuser.hue.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hue.hosts'] = '*'
services_configurations['hdfs-site'] = {}
services_configurations['hdfs-site']['dfs.namenode.acls.enabled'] = 'true'
if params.hue_hbase_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.hbase.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hbase.hosts'] = '*'
if params.hue_hive_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.hive.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.hive.hosts'] = '*'
if params.hue_spark_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.spark.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.spark.hosts'] = '*'
if params.hue_oozie_module_enabled == 'Yes':
services_configurations['core-site']['hadoop.proxyuser.oozie.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.oozie.hosts'] = '*'
if params.dfs_ha_enabled:
services_configurations['core-site']['hadoop.proxyuser.httpfs.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.httpfs.hosts'] = '*'
# services_configurations['httpfs-site'] = {}
# services_configurations['httpfs-site']['httpfs.proxyuser.hue.groups'] = '*'
# services_configurations['httpfs-site']['httpfs.proxyuser.hue.hosts'] = '*'
if security_enabled:
services_configurations['core-site']['hadoop.proxyuser.HTTP.groups'] = '*'
services_configurations['core-site']['hadoop.proxyuser.HTTP.hosts'] = '*'
services_configurations['core-site']['hue.kerberos.principal.shortname'] = 'hue'
add_configurations(services_configurations)
def add_hbase_configuration(if_ranger=False, security_enabled=False):
import params
services_configurations = {}
services_configurations['hbase-site'] = {}
if if_ranger:
services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
if security_enabled:
services_configurations['hbase-site']['hbase.thrift.security.qop'] = 'auth'
services_configurations['hbase-site']['hbase.thrift.support.proxyuser'] = 'true'
services_configurations['hbase-site']['hbase.regionserver.thrift.http'] = 'true'
services_configurations['hbase-site']['hbase.thrift.kerberos.principal'] = params.HTTP_principal
services_configurations['hbase-site']['hbase.thrift.keytab.file'] = params.HTTP_keytab
services_configurations['hbase-site']['hbase.rpc.engine'] = 'org.apache.hadoop.hbase.ipc.SecureRpcEngine'
add_configurations(services_configurations)
def add_hive_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['hive-site'] = {}
services_configurations['hive-site']['hive.security.authorization.sqlstd.confwhitelist.append'] = 'hive.server2.logging.operation.verbose'
services_configurations['webhcat-site'] = {}
services_configurations['webhcat-site']['webhcat.proxyuser.hue.groups'] = '*'
services_configurations['webhcat-site']['webhcat.proxyuser.hue.hosts'] = '*'
if if_ranger:
services_configurations['hive-site']['hive.server2.enable.impersonation'] = 'true'
add_configurations(services_configurations)
def add_oozie_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['oozie-site'] = {}
services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.groups'] = '*'
services_configurations['oozie-site']['oozie.service.ProxyUserService.proxyuser.hue.hosts'] = '*'
add_configurations(services_configurations)
def add_spark_configuration(if_ranger=False, security_enabled=False):
services_configurations = {}
services_configurations['livy-conf'] = {}
services_configurations['livy-conf']['livy.server.csrf_protection.enabled'] = 'false'
add_configurations(services_configurations)
def add_configurations(services_configurations):
"""
Run the script file to add configurations
#/var/lib/ambari-server/resources/scripts/configs.sh set ambari-server-host \
cluster_name core-site "hadoop.proxyuser.hbase.hosts" "*"
services_configurations:{'configuration file1':{'key1':'value1','key2':'value2',...},
'configuration file2':{'key1':'value1','key2':'value2',...}
...}
"""
import params
if isinstance(services_configurations, dict):
for i in range(len(services_configurations)):
key1 = services_configurations.keys()[i]
value1 = services_configurations[key1]
if isinstance(value1, dict):
for j in range(len(value1)):
key2 = value1.keys()[j]
value2 = value1[key2]
cmd = format(params.service_packagedir + "/files/configs.sh set " + params.ambari_server_hostname + " " + params.cluster_name + " " + key1 + " '" + key2 + "' '"+ value2 + "'")
Execute(cmd)
| 45.786585
| 185
| 0.721001
|
9f06648d63fa866851ae224017d16dfbb2f9e5e0
| 631
|
py
|
Python
|
backend/manage.py
|
crowdbotics-apps/app-1-30327
|
c2759a779e420e57de669c94b22c6a0794eff158
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/manage.py
|
crowdbotics-apps/app-1-30327
|
c2759a779e420e57de669c94b22c6a0794eff158
|
[
"FTL",
"AML",
"RSA-MD"
] | 8
|
2021-09-12T16:36:23.000Z
|
2022-01-09T15:35:42.000Z
|
backend/manage.py
|
crowdbotics-apps/app-1-30327
|
c2759a779e420e57de669c94b22c6a0794eff158
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app_1_30327.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.681818
| 75
| 0.684628
|
08e55839d8831f15989f14c1c4e84e70785de148
| 794
|
py
|
Python
|
y2014_bot3/control_loops/python/polydrivetrain.py
|
AustinSchuh/971-Robot-Code
|
99abc66fd2d899c0bdab338dc6f57dc5def9be8d
|
[
"Apache-2.0"
] | 39
|
2021-06-18T03:22:30.000Z
|
2022-03-21T15:23:43.000Z
|
y2014_bot3/control_loops/python/polydrivetrain.py
|
AustinSchuh/971-Robot-Code
|
99abc66fd2d899c0bdab338dc6f57dc5def9be8d
|
[
"Apache-2.0"
] | 10
|
2021-06-18T03:22:19.000Z
|
2022-03-18T22:14:15.000Z
|
y2014_bot3/control_loops/python/polydrivetrain.py
|
AustinSchuh/971-Robot-Code
|
99abc66fd2d899c0bdab338dc6f57dc5def9be8d
|
[
"Apache-2.0"
] | 4
|
2021-08-19T19:20:04.000Z
|
2022-03-08T07:33:18.000Z
|
#!/usr/bin/python3
import sys
from y2014_bot3.control_loops.python import drivetrain
from frc971.control_loops.python import polydrivetrain
import gflags
import glog
__author__ = 'Austin Schuh (austin.linux@gmail.com)'
FLAGS = gflags.FLAGS
try:
gflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')
except gflags.DuplicateFlagError:
pass
def main(argv):
if FLAGS.plot:
polydrivetrain.PlotPolyDrivetrainMotions(drivetrain.kDrivetrain)
elif len(argv) != 7:
glog.fatal('Expected .h file name and .cc file name')
else:
polydrivetrain.WritePolyDrivetrain(argv[1:3], argv[3:5], argv[5:7],
'y2014_bot3', drivetrain.kDrivetrain)
if __name__ == '__main__':
argv = FLAGS(sys.argv)
glog.init()
sys.exit(main(argv))
| 24.8125
| 76
| 0.707809
|
65f439c3507f2a78469ebba504f4aab7c9983cc8
| 1,429
|
py
|
Python
|
jobya/jobs/tests/test_resources.py
|
xblzbjs/Jobya
|
b936ce37da86bfe8326a532dab3887fae6c65e45
|
[
"MIT"
] | null | null | null |
jobya/jobs/tests/test_resources.py
|
xblzbjs/Jobya
|
b936ce37da86bfe8326a532dab3887fae6c65e45
|
[
"MIT"
] | 2
|
2022-02-08T01:15:52.000Z
|
2022-03-31T04:24:15.000Z
|
jobya/jobs/tests/test_resources.py
|
xblzbjs/Jobya
|
b936ce37da86bfe8326a532dab3887fae6c65e45
|
[
"MIT"
] | null | null | null |
import pytest
from ..resources import JobExportResource
from .factories import JobFactory
pytestmark = pytest.mark.django_db
class TestJobExportResource:
@classmethod
def setup_class(cls):
cls.resource = JobExportResource()
def test_fields(self):
expected_fields = [
"uuid",
"title",
"category",
"company",
"type",
"description",
"salary_min",
"salary_max",
"redirect_url",
"support_remote",
]
assert len(self.resource.fields) == len(expected_fields)
assert all([e in self.resource.fields for e in expected_fields])
def test_dehydrating_fields(self):
job = JobFactory()
salary_min = self.resource.export_field(self.resource.get_fields()[-4], job)
salary_max = self.resource.export_field(self.resource.get_fields()[-3], job)
assert salary_min == job.salary.lower
assert salary_max == job.salary.upper
def test_get_export_order(self):
expected_headers = [
"UUID",
"Title",
"Category",
"Company",
"Type",
"Description",
"Salary min",
"Salary max",
"Redirect url",
"Whether to support remote",
]
assert self.resource.get_export_headers() == expected_headers
| 25.981818
| 84
| 0.56753
|
2075ae97ff10db601a87564a79b25556c3041d2b
| 5,124
|
py
|
Python
|
messageticker_iot.py
|
cidoni/newsticker
|
afa7502ffa0bafc6a1fd7a281a4c9de972c0582a
|
[
"MIT"
] | null | null | null |
messageticker_iot.py
|
cidoni/newsticker
|
afa7502ffa0bafc6a1fd7a281a4c9de972c0582a
|
[
"MIT"
] | null | null | null |
messageticker_iot.py
|
cidoni/newsticker
|
afa7502ffa0bafc6a1fd7a281a4c9de972c0582a
|
[
"MIT"
] | null | null | null |
'''
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import logging
import time
import argparse
import json
from messageticker import showOnLEDDisplay
AllowedActions = ['both', 'publish', 'subscribe']
# Custom MQTT message callback
def customCallback(client, userdata, message):
#print("Received a new message: ")
msg=message.payload.decode('utf-8')
print(msg)
showOnLEDDisplay(msg)
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-p", "--port", action="store", dest="port", type=int, help="Port number override")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub",
help="Targeted client id")
parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic")
parser.add_argument("-m", "--mode", action="store", dest="mode", default="both",
help="Operation modes: %s"%str(AllowedActions))
parser.add_argument("-M", "--message", action="store", dest="message", default="Hello World!",
help="Message to publish")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
port = args.port
useWebsocket = args.useWebsocket
clientId = args.clientId
topic = args.topic
if args.mode not in AllowedActions:
parser.error("Unknown --mode option %s. Must be one of %s" % (args.mode, str(AllowedActions)))
exit(2)
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Port defaults
if args.useWebsocket and not args.port: # When no port override for WebSocket, default to 443
port = 443
if not args.useWebsocket and not args.port: # When no port override for non-WebSocket, default to 8883
port = 8883
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.ERROR)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
if useWebsocket:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath)
else:
myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)
myAWSIoTMQTTClient.configureEndpoint(host, port)
myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTClient connection configuration
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
myAWSIoTMQTTClient.connect()
if args.mode == 'both' or args.mode == 'subscribe':
myAWSIoTMQTTClient.subscribe(topic, 1, customCallback)
time.sleep(2)
# Publish to the same topic in a loop forever
loopCount = 0
while True:
'''
if args.mode == 'both' or args.mode == 'publish':
message = {}
message['message'] = args.message
message['sequence'] = loopCount
messageJson = json.dumps(message)
myAWSIoTMQTTClient.publish(topic, messageJson, 1)
if args.mode == 'publish':
print('Published topic %s: %s\n' % (topic, messageJson))
loopCount += 1
'''
time.sleep(1)
| 39.72093
| 120
| 0.728532
|
423d346fc968b525009a59883d7b13012c4eb290
| 37,096
|
py
|
Python
|
printer.py
|
vishalmohanty/webscraper
|
40e801008ac10249682e4115057e993aa4afda61
|
[
"MIT"
] | null | null | null |
printer.py
|
vishalmohanty/webscraper
|
40e801008ac10249682e4115057e993aa4afda61
|
[
"MIT"
] | 5
|
2022-02-12T18:37:59.000Z
|
2022-03-06T02:59:47.000Z
|
printer.py
|
vishalmohanty/webscraper
|
40e801008ac10249682e4115057e993aa4afda61
|
[
"MIT"
] | null | null | null |
import csv
import math
import matplotlib.lines as mlines
from matplotlib.ticker import MaxNLocator
import matplotlib.pylab as plt
import numpy as np
from matplotlib import rcParams
rcParams['font.family'] = 'Times New Roman'
weighted = {
'Montgomery, AL': -0.03918335016516895,
'Juneau, AK': -0.04819714984707875,
'Phoenix, AZ': -0.04127845221123429,
'Little Rock, AR': -0.043208189240339485,
'Sacramento, CA': -0.0447932962175611,
'Denver, CO': -0.03534568725038026,
'Hartford, CT': -0.051318345269589304,
'Dover, DE': -0.04410079817485979,
'Tallahassee, FL': -0.04904944079404738,
'Atlanta, GA': -0.03835455112072264,
'Honolulu, HI': -0.04430356055903157,
'Boise, ID': -0.04429277255559287,
'Springfield, IL': -0.049079223899573546,
'Indianapolis, IN': -0.04045169826106026,
'Des Moines, IA': -0.04411556134405065,
'Topeka, KS': -0.05086811538147495,
'Frankfort, KY': -0.042184825476727035,
'Baton Rouge, LA': -0.04389737416132434,
'Augusta, ME': -0.04212128299879622,
'Annapolis, MD': -0.04388988333076714,
'Boston, MA': -0.0383121150700335,
'Lansing, MI': -0.04572841478868638,
'St. Paul, MN': -0.04296604818535782,
'Jackson, MS': -0.04470627813704356,
'Jefferson City, MO': -0.03657775051265361,
'Helena, MT': -0.04413495825518135,
'Lincoln, NE': -0.03617707893524351,
'Carson City, NV': -0.03583424627844964,
'Concord, NH': -0.04914145723628957,
'Trenton, NJ': -0.041852742656047326,
'Santa Fe, NM': -0.04794226779065536,
'Albany, NY': -0.0439948548029637,
'Raleigh, NC': -0.037781209393634,
'Bismarck, ND': -0.038778420835549575,
'Oklahoma City, OH': -0.04361796312801526,
'Salem, OR': -0.039538993616616745,
'Harrisburg, PA': -0.04214384792617321,
'Providence, RI': -0.04021977341993038,
'Columbia, SC': -0.04350047974814068,
'Pierre, SD': -0.040579818043541147,
'Nashville, TN': -0.0466268401751304,
'Austin, TX': -0.04279195547437081,
'Salt Lake City, UT': -0.042489828172970145,
'Montpelier, VT': -0.03809070731910734,
'Richmond, VA': -0.046323110278850894,
'Olympia, WA': -0.04694064637514814,
'Charleston, WV': -0.04706013948669316,
'Madison, WI': -0.03517318797591053,
'Cheyenne, WY': -0.04127756238860428
}
unweighted = {
'AL': -0.071806,
'AK': -0.082814,
'AZ': -0.083603,
'AR': -0.085412,
'CA': -0.064396,
'CO': -0.064324,
'CT': -0.057863,
'DE': -0.075264,
'FL': -0.057875,
'GA': -0.090823,
'HI': -0.080613,
'ID': -0.080004,
'IL': -0.062313,
'IN': -0.064927,
'IA': -0.080639,
'KS': -0.089863,
'KY': -0.068951,
'LA': -0.073592,
'ME': -0.059620,
'MD': -0.062009,
'MA': -0.053489,
'MI': -0.077970,
'MN': -0.068342,
'MS': -0.065541,
'MO': -0.069612,
'MT': -0.075233,
'NE': -0.061198,
'NV': -0.050031,
'NH': -0.068559,
'NJ': -0.075672,
'NM': -0.082795,
'NY': -0.069741,
'NC': -0.052452,
'ND': -0.066142,
'OH': -0.073240,
'OR': -0.053687,
'PA': -0.102050,
'RI': -0.058951,
'SC': -0.067755,
'SD': -0.074587,
'TN': -0.083953,
'TX': -0.068249,
'UT': -0.063682,
'VT': -0.060763,
'VA': -0.064583,
'WA': -0.083202,
'WV': -0.100591,
'WI': -0.075631,
'WY': -0.089318
}
google_weighted = {
'Montgomery,Alabama': -0.052620,
'Juneau,Juneau,Alaska': -0.051601,
'Phoenix,Arizona': -0.055961,
'Little Rock,Arkansas': -0.052230,
'Sacramento,California': -0.057171,
'Denver,Colorado': -0.057646,
'Hartford,Connecticut': -0.055862,
'Dover,Delaware': -0.057227,
'Tallahassee,Florida': -0.052155022000576014,
'Atlanta,Georgia': -0.05557964076252149,
'Honolulu,Hawaii': -0.05468414854474025,
'Boise,Idaho': -0.053364660804897476,
'Springfield,Illinois': -0.0525578252727048,
'Indianapolis,Indiana': -0.050825842137306974,
'Des Moines,New Mexico': -0.04908492404672618,
'Topeka,Indiana': -0.04693345961806908,
'Frankfort,Kansas': -0.05178464198623083,
'Baton Rouge,Louisiana': -0.05996784543835305,
'Augusta,Maine': -0.05777312624740943,
'Annapolis,Missouri': -0.06305408946402166,
'Boston,Massachusetts': -0.05599401243072362,
'Lansing,Michigan': -0.05363612221065318,
'Saint Paul,Minnesota': -0.05483828005945487,
'Jackson,Missouri': -0.05363580468681736,
'Jefferson City,Missouri': -0.057585290175204626,
'Helena,Montana': -0.05282379169593389,
'Lincoln,Montana': -0.05518281865916025,
'Carson City,Nevada': -0.057819502850532285,
'Concord,New Hampshire': -0.056172768550330716,
'Trenton,New Jersey': -0.050565,
'Santa Fe,New Mexico': -0.054469,
'Albany,New York': -0.056666,
'Raleigh,North Carolina': -0.057353,
'Columbus,Ohio': -0.053442,
'Oklahoma City,Oklahoma': -0.056267,
'Salem,Oregon': -0.053718,
'Harrisburg,Pennsylvania': -0.055401,
'Columbia,South Carolina': -0.054451,
'Pierre,South Dakota': -0.052940,
'Nashville,Tennessee': -0.055467,
'Austin,Texas': -0.057399,
'Montpelier,Vermont': -0.057246,
'Richmond County,Virginia': -0.055788,
'Olympia,Washington': -0.058078,
'Charleston,West Virginia': -0.056183,
'Cheyenne,Wyoming': -0.051547,
'Bismarck,North Dakota': -0.053327,
'Salt Lake City,Utah': -0.049546,
'Madison,Wisconsin': -0.052086
}
ddg_weighted = {
'California': -0.005289,
'New York': -0.004873,
'Florida': -0.004223,
'DC': -0.006426,
'Illinois': -0.003186
}
google_selected_weighted = {
'California': -0.057171,
'New York': -0.056666,
'Florida': -0.052155022000576014,
'Illinois': -0.0525578252727048
}
bing_selected_weighted = {
'California': -0.0447932962175611,
'New York': -0.0439948548029637,
'Florida': -0.04904944079404738,
'Illinois': -0.049079223899573546
}
ddg_lists = sorted(ddg_weighted.items())
ddg_x, ddg_y = zip(*ddg_lists)
g_select_lists = sorted(google_selected_weighted.items())
g_select_x, g_select_y = zip(*g_select_lists)
b_select_lists = sorted(bing_selected_weighted.items())
b_select_x, b_select_y = zip(*b_select_lists)
google_weighted_keys = list(google_weighted.keys())
counter = 1
for old_key in google_weighted_keys:
# print(old_key)
google_weighted[counter] = google_weighted[old_key]
google_weighted.pop(old_key, None)
counter += 1
lists = sorted(google_weighted.items())
google_x, google_y = zip(*lists)
four_am_weighted = {'Montgomery, AL': -0.04106160563522392,
'Juneau, AK': -0.04064849660680458,
'Phoenix, AZ': -0.04265279814978116,
'Little Rock, AR': -0.03930308863192053,
'Sacramento, CA': -0.040419826241333286,
'Denver, CO': -0.03904982615326652,
'Hartford, CT': -0.035606543448998246,
'Dover, DE': -0.04090555998222333,
'Tallahassee, FL': -0.050915518569258966,
'Atlanta, GA': -0.049727090576971514,
'Honolulu, HI': -0.04783970951436129,
'Boise, ID': -0.035020529725744896,
'Springfield, IL': -0.03746684172767187,
'Indianapolis, IN': -0.04756033710095835,
'Des Moines, IA': -0.034563683837355245,
'Topeka, KS': -0.04550256955292404,
'Frankfort, KY': -0.038019192506392144,
'Baton Rouge, LA': -0.053615543995478854,
'Augusta, ME': -0.03942428338139096,
'Annapolis, MD': -0.03439317053285649,
'Boston, MA': -0.031500983803294975,
'Lansing, MI': -0.04350563944003527,
'St. Paul, MN': -0.046485900079785,
'Jackson, MS': -0.04553522749514757,
'Jefferson City, MO': -0.046350631481364826,
'Helena, MT': -0.04677409953353769,
'Lincoln, NE': -0.044588639757308965,
'Carson City, NV': -0.04534741177225348,
'Concord, NH': -0.03931576946625701,
'Trenton, NJ': -0.050834531983403784,
'Santa Fe, NM': -0.040076032039976485,
'Albany, NY': -0.05177463857044876,
'Raleigh, NC': -0.042098743463228915,
'Bismarck, ND': -0.04537421959771563,
'Oklahoma City, OH': -0.05003428665152617,
'Salem, OR': -0.047811487222152234,
'Harrisburg, PA': -0.04427963014170003,
'Providence, RI': -0.047955175091332826,
'Columbia, SC': -0.04848707708409153,
'Pierre, SD': -0.03970123649428907,
'Nashville, TN': -0.041485485981635456,
'Austin, TX': -0.045247370591412144,
'Salt Lake City, UT': -0.03710654358663904,
'Montpelier, VT': -0.04443400320248127,
'Richmond, VA': -0.04212392927304667,
'Olympia, WA': -0.05046484392817484,
'Charleston, WV': -0.05307760559381502,
'Madison, WI': -0.044838417680892646,
'Cheyenne, WY': -0.03900113811728395}
google_ac_scores = [{'U': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0, 0,
0, 0.0, 0.0],
'US ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US E': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US El': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Ele': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Elec': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Elect': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Electi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Electio': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election D': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election De': [1, 0, 0.25, 0.0, 512, 0, 51.2, 0.0], 'US Election Dem': [1, 0, 0.5, 0.0, 512, 0, 85.33333333333333, 0.0], 'US Election Demo': [1, 0, 0.5, 0.0, 512, 0, 85.33333333333333, 0.0], 'US Election Democ': [2, 0, 1.3333333333333333, 0.0, 534, 0, 178.0, 0.0], 'US Election Democr': [2, 0, 1.3333333333333333, 0.0, 534, 0, 178.0, 0.0], 'US Election Democra': [2, 0, 1.3333333333333333, 0.0, 534, 0, 178.0, 0.0], 'US Election Democrat': [3, 0, 1.8333333333333333, 0.0, 556, 0, 185.33333333333334, 0.0], 'US Election Democrati': [4, 0, 2.083333333333333, 0.0, 578, 0, 144.5, 0.0], 'US Election Democratic': [4, 0, 2.083333333333333, 0.0, 578, 0, 144.5, 0.0]}, {'U': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US E': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US El': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Ele': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Elec': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Elect': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Electi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Electio': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election R': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Re': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Rep': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Repu': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Repub': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Republ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Republi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Republic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Republica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Election Republican': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0]}, {'U': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Po': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Pol': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Poli': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Polit': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Politi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Politic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Politica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Pa': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Par': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Part': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party D': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party De': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Dem': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Demo': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Democ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Democr': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Democra': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Democrat': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Democrati': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Democratic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0]}, {'U': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Po': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Pol': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Poli': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Polit': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Politi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Politic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Politica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Pa': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Par': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Part': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party R': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Re': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Rep': [1, 1, 1.0, 1.0, 22, 22, 4.4, 4.4], 'US Political Party Repu': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Repub': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Republ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Republi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Republic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'US Political Party Republica': [1, 1, 1.0, 1.0, 22, 22, 22.0, 22.0], 'US Political Party Republican': [1, 1, 1.0, 1.0, 22, 22, 22.0, 22.0]}, {'P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Pa': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Par': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Part': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party D': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party De': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party Dem': [2, 0, 0.225, 0.0, 1024, 0, 102.4, 0.0], 'Party Demo': [3, 0, 0.44285714285714284, 0.0, 1046, 0, 104.6, 0.0], 'Party Democ': [9, 0, 1.928968253968254, 0.0, 1178, 0, 117.8, 0.0], 'Party Democr': [9, 0, 1.928968253968254, 0.0, 1178, 0, 117.8, 0.0], 'Party Democra': [9, 0, 1.928968253968254, 0.0, 1178, 0, 117.8, 0.0], 'Party Democrat': [10, 0, 2.9289682539682538, 0.0, 1200, 0, 120.0, 0.0], 'Party Democrati': [10, 0, 2.9289682539682538, 0.0, 1200, 0, 120.0, 0.0], 'Party Democratic': [10, 0, 2.9289682539682538, 0.0, 1200, 0, 120.0, 0.0]}, {'P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Pa': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Par': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Part': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party R': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party Re': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party Rep': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party Repu': [0, 7, 0.0, 1.0956349206349207, 0, 154, 0.0, 15.4], 'Party Repub': [0, 9, 0.0, 1.928968253968254, 0, 633, 0.0, 63.3], 'Party Republ': [0, 9, 0.0, 1.928968253968254, 0, 633, 0.0, 63.3], 'Party Republi': [0, 9, 0.0, 1.928968253968254, 0, 633, 0.0, 63.3], 'Party Republic': [0, 9, 0.0, 1.928968253968254, 0, 633, 0.0, 63.3], 'Party Republica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Party Republican': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0]}, {'C': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Ca': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Can': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Cand': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candid': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candida': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidat': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate D': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate De': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Dem': [3, 0, 0.43452380952380953, 0.0, 66, 0, 7.333333333333333, 0.0], 'Candidate Demo': [3, 0, 0.6166666666666667, 0.0, 66, 0, 11.0, 0.0], 'Candidate Democ': [3, 0, 1.8333333333333333, 0.0, 66, 0, 22.0, 0.0], 'Candidate Democr': [2, 0, 1.5, 0.0, 44, 0, 22.0, 0.0], 'Candidate Democra': [3, 0, 1.8333333333333333, 0.0, 66, 0, 22.0, 0.0], 'Candidate Democrat': [3, 0, 1.8333333333333333, 0.0, 66, 0, 22.0, 0.0], 'Candidate Democrati': [3, 0, 1.8333333333333333, 0.0, 57, 0, 19.0, 0.0], 'Candidate Democratic': [3, 0, 1.8333333333333333, 0.0, 66, 0, 22.0, 0.0]}, {'C': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Ca': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Can': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Cand': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candid': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candida': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidat': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate R': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Re': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Rep': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Repu': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Repub': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Republ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Republi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Republic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Republica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Candidate Republican': [0, 2, 0.0, 1.5, 0, 44, 0.0, 22.0]}, {'P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics D': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics De': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Dem': [3, 0, 0.33611111111111114, 0.0, 66, 0, 6.6, 0.0], 'Politics Demo': [4, 0, 0.47896825396825393, 0.0, 88, 0, 8.8, 0.0], 'Politics Democ': [6, 0, 0.8456349206349206, 0.0, 132, 0, 13.2, 0.0], 'Politics Democr': [6, 0, 0.8456349206349206, 0.0, 132, 0, 13.2, 0.0], 'Politics Democra': [6, 0, 0.8456349206349206, 0.0, 132, 0, 13.2, 0.0], 'Politics Democrat': [10, 1, 2.9289682539682538, 1.0, 220, 22, 22.0, 2.2], 'Politics Democrati': [10, 1, 2.9289682539682538, 1.0, 211, 13, 21.1, 1.3], 'Politics Democratic': [10, 1, 2.9289682539682538, 1.0, 220, 22, 22.0, 2.2]}, {'P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics R': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Re': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Rep': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Repu': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Repub': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Republ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Republi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Republic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politics Republica': [1, 1, 1.0, 1.0, 22, 22, 22.0, 22.0], 'Politics Republican': [1, 1, 1.0, 1.0, 22, 22, 22.0, 22.0]}, {'P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political N': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political Ne': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political New': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News D': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News De': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Dem': [1, 0, 0.5, 0.0, 22, 0, 11.0, 0.0], 'Political News Demo': [1, 0, 0.5, 0.0, 22, 0, 11.0, 0.0], 'Political News Democ': [1, 0, 1.0, 0.0, 22, 0, 22.0, 0.0], 'Political News Democr': [1, 0, 1.0, 0.0, 22, 0, 22.0, 0.0], 'Political News Democra': [1, 0, 1.0, 0.0, 22, 0, 22.0, 0.0], 'Political News Democrat': [1, 0, 1.0, 0.0, 22, 0, 22.0, 0.0], 'Political News Democrati': [1, 0, 1.0, 0.0, 22, 0, 22.0, 0.0], 'Political News Democratic': [1, 0, 1.0, 0.0, 22, 0, 22.0, 0.0]}, {'P': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Politica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political N': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political Ne': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political New': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News R': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Re': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Rep': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Repu': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Repub': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Republ': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Republi': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Republic': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Republica': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0], 'Political News Republican': [0, 0, 0.0, 0.0, 0, 0, 0.0, 0.0]}]
ddg_ac_scores = [{'U': [0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0], 'US E':[0, 0, 0.0, 0.0], 'US El': [0, 0, 0.0, 0.0], 'US Ele': [0, 0, 0.0, 0.0], 'US Elec': [0, 0, 0.0, 0.0], 'US Elect': [0, 0, 0.0, 0.0], 'US Electi': [0, 0, 0.0, 0.0], 'US Electio': [0, 0, 0.0, 0.0], 'US Election': [0, 0, 0.0, 0.0], 'US Election ': [0, 0, 0.0, 0.0], 'US Election D': [0, 0, 0.0, 0.0], 'US Election De': [0, 0, 0.0, 0.0], 'US Election Dem': [1, 0, 0.5, 0.0], 'US Election Demo': [1, 0, 0.5, 0.0], 'US Election Democ': [1, 0, 1.0, 0.0], 'US Election Democr': [1, 0, 1.0, 0.0], 'US Election Democra': [1, 0, 1.0, 0.0], 'US Election Democrat': [1, 0, 1.0, 0.0], 'US Election Democrati': [1, 0, 1.0, 0.0], 'US Election Democratic': [3, 0, 1.5833333333333333, 0.0]},
{'U': [0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0], 'US E': [0, 0, 0.0, 0.0], 'US El': [0, 0, 0.0, 0.0], 'US Ele': [0, 0, 0.0, 0.0], 'US Elec': [0, 0, 0.0, 0.0], 'US Elect': [0, 0, 0.0, 0.0], 'US Electi': [0, 0, 0.0, 0.0], 'US Electio': [0, 0, 0.0, 0.0], 'US Election': [0, 0, 0.0, 0.0], 'US Election ': [0, 0, 0.0, 0.0], 'US Election R': [0, 0, 0.0, 0.0], 'US Election Re': [0, 0, 0.0, 0.0], 'US Election Rep': [0, 2, 0.0, 0.5833333333333333], 'US Election Repu': [0, 2, 0.0, 1.5], 'US Election Repub': [0, 3, 0.0, 1.75], 'US Election Republ': [0, 3, 0.0, 1.75], 'US Election Republi': [0, 3, 0.0, 1.75], 'US Election Republic': [0, 3, 0.0, 1.75], 'US Election Republica': [0, 3, 0.0, 1.8333333333333333], 'US Election Republican': [0, 5, 0.0, 2.283333333333333]},
{'U': [0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0], 'US P': [0, 0, 0.0, 0.0], 'US Po': [0, 0, 0.0, 0.0], 'US Pol': [0, 0, 0.0, 0.0], 'US Poli': [0, 0, 0.0, 0.0], 'US Polit': [0, 0, 0.0, 0.0], 'US Politi': [0, 0, 0.0, 0.0], 'US Politic': [0, 0, 0.0, 0.0], 'US Politica': [0, 0, 0.0, 0.0], 'US Political': [0, 0, 0.0, 0.0], 'US Political ': [0, 0, 0.0, 0.0], 'US Political P': [0, 0, 0.0, 0.0], 'US Political Pa': [0, 0, 0.0, 0.0], 'US Political Par': [0, 0, 0.0, 0.0], 'US Political Part': [0, 0, 0.0, 0.0], 'US Political Party': [0, 0, 0.0, 0.0], 'US Political Party ': [0, 0, 0.0, 0.0], 'US Political Party D': [0, 0, 0.0, 0.0], 'US Political Party De': [0, 0, 0.0, 0.0], 'US Political Party Dem': [1, 0, 0.2, 0.0], 'US Political Party Demo': [1, 0, 0.25, 0.0], 'US Political Party Democ': [1, 0, 0.5, 0.0], 'US Political Party Democr': [1, 0, 0.5, 0.0], 'US Political Party Democra': [1, 0, 0.5, 0.0], 'US Political Party Democrat': [1, 0, 1.0, 0.0], 'US Political Party Democrati': [1, 0, 1.0, 0.0], 'US Political Party Democratic': [1, 0, 1.0, 0.0]},
{'U': [0, 0, 0.0, 0.0], 'US': [0, 0, 0.0, 0.0], 'US ': [0, 0, 0.0, 0.0], 'US P': [0, 0, 0.0, 0.0], 'US Po': [0, 0, 0.0, 0.0], 'US Pol': [0, 0, 0.0, 0.0], 'US Poli': [0, 0, 0.0, 0.0], 'US Polit': [0, 0, 0.0, 0.0], 'US Politi': [0, 0, 0.0, 0.0], 'US Politic': [0, 0, 0.0, 0.0], 'US Politica': [0, 0, 0.0, 0.0], 'US Political': [0, 0, 0.0, 0.0], 'US Political ': [0, 0, 0.0, 0.0], 'US Political P': [0, 0, 0.0, 0.0], 'US Political Pa': [0, 0, 0.0, 0.0], 'US Political Par': [0, 0, 0.0, 0.0], 'US Political Part': [0, 0, 0.0, 0.0], 'US Political Party': [0, 0, 0.0, 0.0], 'US Political Party ': [0, 0, 0.0, 0.0], 'US Political Party R': [0, 0, 0.0, 0.0], 'US Political Party Re': [0, 1, 0.0, 0.14285714285714285], 'US Political Party Rep': [0, 1, 0.0, 0.16666666666666666], 'US Political Party Repu': [0, 1, 0.0, 0.5], 'US Political Party Repub': [0, 1, 0.0, 0.2], 'US Political Party Republ': [0, 1, 0.0, 1.0], 'US Political Party Republi': [0, 1, 0.0, 1.0], 'US Political Party Republic': [0, 1, 0.0, 1.0], 'US Political Party Republica': [0, 1, 0.0, 1.0], 'US Political Party Republican': [0, 0, 0.0, 0.0]},
{'P': [0, 0, 0.0, 0.0], 'Pa': [0, 0, 0.0, 0.0], 'Par': [0, 0, 0.0, 0.0], 'Part': [0, 0, 0.0, 0.0], 'Party': [0, 0, 0.0, 0.0], 'Party ': [0, 0, 0.0, 0.0], 'Party D': [0, 0, 0.0, 0.0], 'Party De': [0, 0, 0.0, 0.0], 'Party Dem': [2, 0, 0.45, 0.0], 'Party Demo': [2, 0, 0.45, 0.0], 'Party Democ': [5, 0, 1.2678571428571428, 0.0], 'Party Democr': [5, 0, 1.2678571428571428, 0.0], 'Party Democra': [5, 0, 1.2678571428571428, 0.0], 'Party Democrat': [7, 0, 2.3845238095238095, 0.0], 'Party Democrati': [2, 0, 1.5, 0.0], 'Party Democratic': [2, 0, 1.5, 0.0]},
{'P': [0, 0, 0.0, 0.0], 'Pa': [0, 0, 0.0, 0.0], 'Par': [0, 0, 0.0, 0.0], 'Part': [0, 0, 0.0, 0.0], 'Party': [0, 0, 0.0, 0.0], 'Party ': [0, 0, 0.0, 0.0], 'Party R': [0, 0, 0.0, 0.0], 'Party Re': [0, 0, 0.0, 0.0], 'Party Rep': [0, 1, 0.0, 0.2], 'Party Repu': [1, 8, 0.125, 2.7178571428571425], 'Party Repub': [1, 8, 0.125, 2.7178571428571425], 'Party Republ': [1, 8, 0.125, 2.7178571428571425], 'Party Republi': [1, 8, 0.125, 2.7178571428571425], 'Party Republic': [0, 7, 0.0, 2.217857142857143], 'Party Republica': [1, 8, 0.125, 2.7178571428571425], 'Party Republican': [1, 8, 0.125, 2.7178571428571425]},
{'C': [0, 0, 0.0, 0.0], 'Ca': [0, 0, 0.0, 0.0], 'Can': [0, 0, 0.0, 0.0], 'Cand': [0, 0, 0.0, 0.0], 'Candi': [0, 0, 0.0, 0.0], 'Candid': [0, 0, 0.0, 0.0], 'Candida': [0, 0, 0.0, 0.0], 'Candidat': [0, 0, 0.0, 0.0], 'Candidate': [0, 0, 0.0, 0.0], 'Candidate ': [0, 0, 0.0, 0.0], 'Candidate D': [0, 0, 0.0, 0.0], 'Candidate De': [0, 0, 0.0, 0.0], 'Candidate Dem': [2, 0, 0.29166666666666663, 0.0], 'Candidate Demo': [3, 0, 0.46785714285714286, 0.0], 'Candidate Democ': [3, 0, 0.726190476190476, 0.0], 'Candidate Democr': [3, 0, 0.726190476190476, 0.0], 'Candidate Democra': [3, 0, 0.726190476190476, 0.0], 'Candidate Democrat': [3, 0, 0.7499999999999999, 0.0], 'Candidate Democrati': [7, 0, 2.4678571428571425, 0.0], 'Candidate Democratic': [7, 0, 2.4678571428571425, 0.0]},
{'C': [0, 0, 0.0, 0.0], 'Ca': [0, 0, 0.0, 0.0], 'Can': [0, 0, 0.0, 0.0], 'Cand': [0, 0, 0.0, 0.0], 'Candi': [0, 0, 0.0, 0.0], 'Candid': [0, 0, 0.0, 0.0], 'Candida': [0, 0, 0.0, 0.0], 'Candidat': [0, 0, 0.0, 0.0], 'Candidate': [0, 0, 0.0, 0.0], 'Candidate ': [0, 0, 0.0, 0.0], 'Candidate R': [0, 0, 0.0, 0.0], 'Candidate Re': [0, 0, 0.0, 0.0], 'Candidate Rep': [0, 1, 0.0, 0.5], 'Candidate Repu': [0, 8, 0.0, 2.7178571428571425], 'Candidate Repub': [0, 8, 0.0, 2.7178571428571425], 'Candidate Republ': [0, 8, 0.0, 2.7178571428571425], 'Candidate Republi': [0, 8, 0.0, 2.7178571428571425], 'Candidate Republic': [0, 8, 0.0, 2.7178571428571425], 'Candidate Republica': [0, 8, 0.0, 2.7178571428571425], 'Candidate Republican': [0, 8, 0.0, 2.7178571428571425]},
{'P': [0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0], 'Politics': [0, 0, 0.0, 0.0], 'Politics ': [0, 0, 0.0, 0.0], 'Politics D': [0, 0, 0.0, 0.0], 'Politics De': [0, 0, 0.0, 0.0], 'Politics Dem': [1, 2, 0.14285714285714285, 0.34285714285714286], 'Politics Demo': [1, 2, 0.14285714285714285, 0.34285714285714286], 'Politics Democ': [1, 2, 0.16666666666666666, 0.3666666666666667], 'Politics Democr': [1, 2, 0.16666666666666666, 0.3666666666666667], 'Politics Democra': [1, 2, 0.16666666666666666, 0.3666666666666667], 'Politics Democrat': [6, 2, 1.2178571428571427, 0.8333333333333333], 'Politics Democrati': [3, 2, 1.5833333333333333, 1.1666666666666667], 'Politics Democratic': [3, 2, 1.75, 1.2]},
{'P': [0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0], 'Politics': [0, 0, 0.0, 0.0], 'Politics ': [0, 0, 0.0, 0.0], 'Politics R': [0, 0, 0.0, 0.0], 'Politics Re': [0, 0, 0.0, 0.0], 'Politics Rep': [0, 2, 0.0, 0.29166666666666663], 'Politics Repu': [0, 4, 0.0, 2.083333333333333], 'Politics Repub': [0, 4, 0.0, 2.083333333333333], 'Politics Republ': [0, 4, 0.0, 2.083333333333333], 'Politics Republi': [0, 4, 0.0, 2.083333333333333], 'Politics Republic': [0, 4, 0.0, 2.083333333333333], 'Politics Republica': [0, 5, 0.0, 2.283333333333333], 'Politics Republican': [0, 8, 0.0, 2.7178571428571425]},
{'P': [0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0], 'Politica': [0, 0, 0.0, 0.0], 'Political': [0, 0, 0.0, 0.0], 'Political ': [0, 0, 0.0, 0.0], 'Political N': [0, 0, 0.0, 0.0], 'Political Ne': [0, 0, 0.0, 0.0], 'Political New': [0, 0, 0.0, 0.0], 'Political News': [0, 0, 0.0, 0.0], 'Political News ': [0, 0, 0.0, 0.0], 'Political News D': [0, 0, 0.0, 0.0], 'Political News De': [0, 0, 0.0, 0.0], 'Political News Dem': [0, 0, 0.0, 0.0], 'Political News Demo': [0, 0, 0.0, 0.0], 'Political News Democ': [0, 0, 0.0, 0.0], 'Political News Democr': [0, 0, 0.0, 0.0], 'Political News Democra': [0, 0, 0.0, 0.0], 'Political News Democrat': [0, 0, 0.0, 0.0], 'Political News Democrati': [0, 0, 0.0, 0.0], 'Political News Democratic': [0, 0, 0.0, 0.0]},
{'P': [0, 0, 0.0, 0.0], 'Po': [0, 0, 0.0, 0.0], 'Pol': [0, 0, 0.0, 0.0], 'Poli': [0, 0, 0.0, 0.0], 'Polit': [0, 0, 0.0, 0.0], 'Politi': [0, 0, 0.0, 0.0], 'Politic': [0, 0, 0.0, 0.0], 'Politica': [0, 0, 0.0, 0.0], 'Political': [0, 0, 0.0, 0.0], 'Political ': [0, 0, 0.0, 0.0], 'Political N': [0, 0, 0.0, 0.0], 'Political Ne': [0, 0, 0.0, 0.0], 'Political New': [0, 0, 0.0, 0.0], 'Political News': [0, 0, 0.0, 0.0], 'Political News ': [0, 0, 0.0, 0.0], 'Political News R': [0, 0, 0.0, 0.0], 'Political News Re': [0, 1, 0.0, 0.125], 'Political News Rep': [0, 1, 0.0, 0.25], 'Political News Repu': [0, 1, 0.0, 1.0], 'Political News Repub': [0, 1, 0.0, 1.0], 'Political News Republ': [0, 1, 0.0, 1.0], 'Political News Republi': [0, 1, 0.0, 1.0], 'Political News Republic': [0, 1, 0.0, 1.0], 'Political News Republica': [0, 1, 0.0, 1.0], 'Political News Republican': [0, 1, 0.0, 1.0]}]
# unweighted_keys = list(unweighted.keys())
# counter = 1
# for old_key in unweighted_keys:
# print(old_key)
# unweighted[counter] = unweighted[old_key]
# unweighted.pop(old_key, None)
# counter += 1
#
# lists = sorted(unweighted.items())
# x, y = zip(*lists) # unpack a list of pairs into two tuples
#
# weighted_keys = list(weighted.keys())
# counter = 1
# for old_key in weighted_keys:
# print(old_key)
# weighted[counter] = weighted[old_key]
# weighted.pop(old_key, None)
# counter += 1
#
# for key in weighted_keys:
# if key[-2:] not in unweighted_keys:
# print("*found: %s" % key)
#
# lists1 = sorted(weighted.items())
# x1, y1 = zip(*lists1)
#
# weighted_keys = list(four_am_weighted.keys())
# counter = 1
# for old_key in weighted_keys:
# print(old_key)
# four_am_weighted[counter] = four_am_weighted[old_key]
# four_am_weighted.pop(old_key, None)
# counter += 1
#
# for key in weighted_keys:
# if key[-2:] not in unweighted_keys:
# print("*found: %s" % key)
#
# lists2 = sorted(four_am_weighted.items())
# x2, y2 = zip(*lists2)
#
# plt.xticks(rotation=90)
# plt.ylim([-0.07, 0.01])
# plt.axhline(y=0.0, color='grey', linestyle=':')
# plt.xlabel("States")
# plt.ylabel("Search Engine Bias")
#
#
# def newline(p1, p2, color='skyblue'):
# ax = plt.gca()
# if p1[1] < p2[1]:
# color = 'lightsalmon'
# l = mlines.Line2D([p1[0], p2[0]], [p1[1], p2[1]], color=color)
# ax.add_line(l)
# return l
# fig, ax = plt.subplots(1,1,figsize=(14,14))
# ax.scatter(y=y1, x=x1, s=50, color='blue', alpha=0.7, label='8pm')
# ax.scatter(y=y2, x=x1, s=50, color='red', alpha=0.7, label='4am')
# ax.set_ylim([-0.06, -0.02])
# ax.set_xlabel('States')
# ax.set_ylabel('Bing Search Bias')
# ax.legend()
# # Line Segments
# for i, p1, p2 in zip(x1, y1, y2):
# newline([i, p1], [i, p2])
fig, axs = plt.subplots(2, 3)
# fig.suptitle("Google's Auto complete bias")
auto_complete_words = ['US Election *',
'US Political Party *',
'Party *',
'Candidate *',
'Politics *',
'Political News *']
counter = 0
unweighted_scores_dem = []
for phrase in ddg_ac_scores:
if counter % 2 == 0:
unweighted_scores_dem = [value[0] for key, value in phrase.items()]
else:
unweighted_scores_rep = [value[1] for key, value in phrase.items()]
dem_x_range = np.arange(1, len(unweighted_scores_dem)+1)
rep_x_range = np.arange(1, len(unweighted_scores_rep)+1)
x_idx = math.floor(counter/6)
y_idx = counter % 3
# if counter == 0:
# axs = axs1
# elif counter == 1:
# axs = axs2
# elif counter == 2:
# axs = axs3
# elif counter == 3:
# axs = axs4
# elif counter == 4:
# axs = axs5
# elif counter == 5:
# axs = axs6
# axs[x_idx, y_idx].yaxis.set_major_locator(MaxNLocator(integer=True))
axs[x_idx, y_idx].set_ylim([0, 11])
axs[x_idx, y_idx].plot(dem_x_range, unweighted_scores_dem, color='dodgerblue',
label='Democratic Suggestions')
axs[x_idx, y_idx].plot(rep_x_range, unweighted_scores_rep, color='indianred',
label='Republican Suggestions')
axs[x_idx, y_idx].set_title(auto_complete_words[math.floor(counter/2)])
# axs[x_idx, y_idx].set_ylabel('Suggestions')
# axs[x_idx, y_idx].xaxis.set_ticks(np.arange(min(dem_x_range), max(dem_x_range)
# + 1, 5))
counter = counter+1
# plt.ylim([-0.06, 0.01])
# plt.plot(ddg_x, ddg_y, color='red', marker='^', markerfacecolor='none', linestyle = '',
# label='DuckDuckGo')
# plt.plot(b_select_x, b_select_y, color='blue', marker='^', markerfacecolor='none',
# linestyle='',
# label='Bing')
# plt.plot(g_select_x, g_select_y, color='orange', marker='^', markerfacecolor='none',
# linestyle='',
# label='Google')
# plt.legend(loc="upper left")
# zip joins x and y coordinates in pairs
plt.show()
| 92.508728
| 13,732
| 0.537848
|
99f154c0847815bd2ff3139c0620f04e7f6bb690
| 1,490
|
py
|
Python
|
geometric_algebra_attention/keras/Multivector2MultivectorAttention.py
|
klarh/geometric_algebra_attention
|
327f5d964b5bf72b6bf54b503c23ad8a0d7dc438
|
[
"MIT"
] | 5
|
2021-10-14T22:24:00.000Z
|
2022-03-24T20:11:59.000Z
|
geometric_algebra_attention/keras/Multivector2MultivectorAttention.py
|
klarh/geometric_algebra_attention
|
327f5d964b5bf72b6bf54b503c23ad8a0d7dc438
|
[
"MIT"
] | 1
|
2021-12-03T18:51:19.000Z
|
2021-12-03T18:51:19.000Z
|
geometric_algebra_attention/keras/Multivector2MultivectorAttention.py
|
klarh/geometric_algebra_attention
|
327f5d964b5bf72b6bf54b503c23ad8a0d7dc438
|
[
"MIT"
] | 2
|
2021-10-14T22:26:07.000Z
|
2022-03-24T20:23:05.000Z
|
from tensorflow import keras
from .. import base
from .MultivectorAttention import MultivectorAttention
class Multivector2MultivectorAttention(base.Multivector2MultivectorAttention, MultivectorAttention):
__doc__ = base.Multivector2MultivectorAttention.__doc__
def __init__(self, score_net, value_net, scale_net, reduce=True,
merge_fun='mean', join_fun='mean', rank=2,
invariant_mode='single', covariant_mode='partial',
include_normalized_products=False, **kwargs):
base.Multivector2MultivectorAttention.__init__(self, scale_net=scale_net)
MultivectorAttention.__init__(
self, score_net=score_net, value_net=value_net,
reduce=reduce, merge_fun=merge_fun, join_fun=join_fun, rank=rank,
invariant_mode=invariant_mode, covariant_mode=covariant_mode,
include_normalized_products=include_normalized_products,
**kwargs)
@classmethod
def from_config(cls, config):
new_config = dict(config)
for key in ('scale_net',):
new_config[key] = keras.models.Sequential.from_config(new_config[key])
return super(Multivector2MultivectorAttention, cls).from_config(new_config)
def get_config(self):
result = super().get_config()
result['scale_net'] = self.scale_net.get_config()
return result
keras.utils.get_custom_objects()['Multivector2MultivectorAttention'] = Multivector2MultivectorAttention
| 43.823529
| 103
| 0.720134
|
92625cccf2e95ba23647157534b187e6644c0383
| 293
|
py
|
Python
|
orange3/doc/data-mining-library/source/reference/code/discretization-table.py
|
rgschmitz1/BioDepot-workflow-builder
|
f74d904eeaf91ec52ec9b703d9fb38e9064e5a66
|
[
"MIT"
] | 54
|
2017-01-08T17:21:49.000Z
|
2021-11-02T08:46:07.000Z
|
orange3/doc/data-mining-library/source/reference/code/discretization-table.py
|
Synthia-3/BioDepot-workflow-builder
|
4ee93abe2d79465755e82a145af3b6a6e1e79fd4
|
[
"MIT"
] | 22
|
2017-03-28T06:03:14.000Z
|
2021-07-28T05:43:55.000Z
|
orange3/doc/data-mining-library/source/reference/code/discretization-table.py
|
Synthia-3/BioDepot-workflow-builder
|
4ee93abe2d79465755e82a145af3b6a6e1e79fd4
|
[
"MIT"
] | 21
|
2017-01-26T21:12:09.000Z
|
2022-01-31T21:34:59.000Z
|
import Orange
iris = Orange.data.Table("iris.tab")
disc = Orange.preprocess.Discretize()
disc.method = Orange.preprocess.discretize.EqualFreq(n=3)
d_iris = disc(iris)
print("Original dataset:")
for e in iris[:3]:
print(e)
print("Discretized dataset:")
for e in d_iris[:3]:
print(e)
| 19.533333
| 57
| 0.709898
|
4daac0ed168965cc289495995a10c8f39089eb72
| 567
|
py
|
Python
|
tests/test_entity_list.py
|
ebelter/mgi
|
37ea9cbddb3d64d3f0b9db4357f76f80a16d52e5
|
[
"MIT"
] | null | null | null |
tests/test_entity_list.py
|
ebelter/mgi
|
37ea9cbddb3d64d3f0b9db4357f76f80a16d52e5
|
[
"MIT"
] | null | null | null |
tests/test_entity_list.py
|
ebelter/mgi
|
37ea9cbddb3d64d3f0b9db4357f76f80a16d52e5
|
[
"MIT"
] | null | null | null |
import os, unittest
from tests.test_base_classes import TestBaseWithDb
from mgi.entity.list import list_entities
class EntityListTest(TestBaseWithDb):
def test_list_entities(self):
rows = list_entities({})
self.assertEqual(rows, [["H_G002", "hic"], ["GRCh38", ""]])
rows = list_entities({"kind": "sample"})
self.assertEqual(rows, [["H_G002", "hic"]])
rows = list_entities({"sets": "hic"})
self.assertEqual(rows, [["H_G002", "hic"]])
# -- EntityListTest
if __name__ == '__main__':
unittest.main(verbosity=2)
| 29.842105
| 67
| 0.64903
|
24f447fa98cb67cc6941de2a0ed0f1ac3b72292f
| 166
|
py
|
Python
|
hackerrank/python/math/polar-coordinates.py
|
ParinVachhani/coding-practice
|
2268215b1e1f26bfe09d0dfb4f94366594a6ef37
|
[
"MIT"
] | 1
|
2018-05-06T14:03:02.000Z
|
2018-05-06T14:03:02.000Z
|
hackerrank/python/math/polar-coordinates.py
|
ParinVachhani/coding-practice
|
2268215b1e1f26bfe09d0dfb4f94366594a6ef37
|
[
"MIT"
] | null | null | null |
hackerrank/python/math/polar-coordinates.py
|
ParinVachhani/coding-practice
|
2268215b1e1f26bfe09d0dfb4f94366594a6ef37
|
[
"MIT"
] | null | null | null |
import cmath
if __name__ == '__main__':
a = complex(input())
print(abs(a))
print(cmath.phase(a))
# import cmath
# print(*cmath.polar(complex(input())), sep='\n')
| 18.444444
| 49
| 0.656627
|
177f0158644b349ae4264a8a9887bc406475e8d7
| 30,061
|
py
|
Python
|
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/conv.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/conv.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | null | null | null |
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/conv.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | null | null | null |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utilities for conv op """
from typing import Tuple, List, Union
import numpy as np
import tensorflow as tf
from aimet_common.utils import AimetLogger
from aimet_tensorflow.utils.common import get_padding, create_input_feed_dict, create_rand_tensors_given_shapes, \
get_valid_ops
from aimet_tensorflow import graph_editor
from aimet_tensorflow.utils.graph_saver import save_and_load_graph
from aimet_tensorflow.utils import constants
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
class WeightTensorUtils:
""" class with generic apis related to TF weight tensor of conv op """
@staticmethod
def get_tensor_index_in_given_op(input_op: tf.Operation) -> int:
"""
Returns the index to weight tensor in the op specified
:param input_op: tf operation type
:return: index of weight tensor in the inputs of the given op
"""
if input_op.type not in constants.OP_WEIGHT_INDICES:
raise ValueError('Op type: '+input_op.type+' does not contain weights!')
return constants.OP_WEIGHT_INDICES[input_op.type]
@staticmethod
def get_tensor_shape(input_op: tf.Operation) -> List[int]:
"""
Returns the shape of weight tensor in the op specified
:param input_op: tf operation type
:return: shape as list
"""
weight_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(input_op)
return input_op.inputs[weight_tensor_index].shape
@staticmethod
def get_read_op(input_op: tf.Operation) -> tf.Operation:
"""
Returns the read op associated with the weight tensor in given op
:param input_op: operation for which the read op on weight tensor is to be obtained
:return: read op associated with weight tensor
"""
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(input_op)
return input_op.inputs[wt_tensor_index].op
@staticmethod
def get_wt_tensor(op: tf.Operation) -> tf.Tensor:
"""
get weight tensor in given op
This is used by api used for updating weight tensor
:param op: tf operation to extract weight tensor from
:return : weight tensor sa tf.Tensor type
"""
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(op)
assert len(op.inputs) == 2
wt_var_read_op = op.inputs[wt_tensor_index].op
wt_tensor = wt_var_read_op.inputs[constants.OP_VAR_WEIGHT_INDEX]
return wt_tensor
@staticmethod
def get_wt_as_read_var_tensor(op: tf.Operation) -> tf.Tensor:
"""
get weight kernel in the op as a readVariableOp tensor
we need to evaluate this to get weights (not get_wt_tensor)
:param op: tf operation to extract weight tensor from
:return : weight tensor as ReadVariableOp tensor
"""
wt_tensor_index = WeightTensorUtils.get_tensor_index_in_given_op(op)
assert len(op.inputs) == 2
get_wt_as_read_var_tensor = op.inputs[wt_tensor_index]
return get_wt_as_read_var_tensor
@staticmethod
def get_tensor_as_numpy_data(sess: tf.compat.v1.Session, op: tf.Operation) -> np.array:
"""
return weight kernel in the op as numpy data
:param sess: TensorFlow session
:param op: tf operation to extract weight tensor from.
:return : weight tensor as numpy array type, if found in the given op
"""
wt_tensor = WeightTensorUtils.get_wt_as_read_var_tensor(op)
numpy_data = sess.run(wt_tensor)
return numpy_data
@staticmethod
def update_tensor_for_op(sess: tf.compat.v1.Session, op: tf.Operation, tensor_as_numpy_array):
"""
updated existing weight tensor variable in given op with new value.
:param sess: active tf.compat.v1.Session
:param op: op for which the weight tensor is to be updated
:param tensor_as_numpy_array: new weight tensor as numpy array
:return: None
"""
# validate the shapes are same
assert WeightTensorUtils.get_tensor_shape(op) == tensor_as_numpy_array.shape
# update the weight tensor
with sess.graph.as_default():
wt_tensor = WeightTensorUtils.get_wt_tensor(op)
assert wt_tensor is not None, ('Error, no weight tensor found for this op', op.name)
# use tensor name to lookup var type associated with it
wt_as_var = [var for var in tf.compat.v1.global_variables() if var.name == wt_tensor.name][0]
wt_as_var.load(tensor_as_numpy_array, sess)
class BiasUtils:
""" util for operating on TF bias tensor"""
@staticmethod
def _get_bias_shape_from_weights(conv_op: tf.Operation) -> int:
"""
helper function to get bias shape from weight shape of a given op.
:param conv_op: conv op as tf.Operation
:return: bias shape for given op
"""
assert conv_op.type in ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']
w_shape = WeightTensorUtils.get_tensor_shape(conv_op)
b_index = constants.OP_WEIGHT_SHAPE_INDEX_FOR_BIAS[conv_op.type]
return w_shape[b_index]
@ staticmethod
def insert_bias_add_op(sess: tf.compat.v1.Session, conv_op_out_tensor: tf.Tensor,
new_bias_tensor: tf.Variable, bias_name="bias_value") -> None:
"""
Insert bias-add op to given conv op.
:param sess: model as tf.compat.v1.Session
:param conv_op_out_tensor: output of conv op that should feed into the new bias op as tf.Tensor
:param new_bias_tensor: bias tensor to be added as tf.Variable
:param bias_name: name string for the bias op
:return: None ,
Note : Higher level api needs to perform a save and load to get updated session after usage of this api
"""
assert conv_op_out_tensor is not None, 'Error, insert_bias_add_op() : conv op output tensor must be provided'
with sess.graph.as_default():
if conv_op_out_tensor.consumers():
consumer_list = []
for consumer in conv_op_out_tensor.consumers():
consumer_list.append(consumer)
# create new Bias add op
bias_add_op = tf.nn.bias_add(value=conv_op_out_tensor, bias=new_bias_tensor, name=bias_name)
# use reroute to insert bias-add and swap current outputs of conv with bias-add op
graph_editor.reroute_ts(bias_add_op, conv_op_out_tensor, can_modify=consumer_list)
# initialize tensor once it's added
sess.run(tf.compat.v1.variables_initializer([new_bias_tensor]))
@staticmethod
def initialize_model_with_bias(sess: tf.compat.v1.Session, input_op_names: List[str], output_op_names: List[str]) \
-> tf.compat.v1.Session:
"""
Initializes given model with bias.
Adds zero bias to conv/linear layers without bias param, in given model.
:param sess: model to be updated as tf.compat.v1.Session
:return: updated session as tf.compat.v1.Session
"""
assert sess is not None
with sess.graph.as_default():
ops = get_valid_ops(sess.graph, input_op_names, output_op_names)
for op in ops:
# skip gradient ops
if not op.name.startswith('gradients/') and \
op.type in ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']:
# add bias if not present
if BiasUtils.is_bias_none(op):
# add bias param
bias_shape = BiasUtils._get_bias_shape_from_weights(op)
zero_bias = tf.Variable(initial_value=np.zeros(bias_shape), dtype=tf.float32)
BiasUtils._create_bias_add_op_and_insert(sess, op, zero_bias)
new_sess = save_and_load_graph('./temp', sess)
sess.close()
return new_sess
@staticmethod
def _create_bias_add_op_and_insert(sess: tf.compat.v1.Session, conv_op: tf.Operation, new_bias_var: tf.Variable,
bias_name="bias_value") -> None:
"""
creates and adds a bias_add op to conv op
:param sess: active tf.compat.v1.Session
:param conv_op: Convolution op
:param new_bias_var: bias variable
:param bias_name: an optional string for bias name
:return: None
"""
assert conv_op.type in ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']
with sess.graph.as_default():
if conv_op.outputs:
bias_index_in_op = BiasUtils.get_bias_index_in_given_op(conv_op)
conv_op_out_tensor = conv_op.outputs[bias_index_in_op]
sess.run(tf.compat.v1.variables_initializer([new_bias_var]))
BiasUtils.insert_bias_add_op(sess, conv_op_out_tensor, new_bias_var,
bias_name)
@staticmethod
def get_bias_index_in_given_op(input_op: tf.Operation) -> int:
"""
Returns the index to bias tensor in the op specified
:param input_op: tf operation type
:return: index of bias tensor in the inputs of the given op
"""
if input_op.type not in constants.OP_BIAS_INDICES:
raise ValueError('Op type: ' + input_op.type + ' does not contain bias!')
return constants.OP_BIAS_INDICES[input_op.type]
@staticmethod
def is_bias_none(input_op: tf.Operation):
"""
checks for the presence of bias in a given op
:param input_op: tf operation type
:return: True if given op has NO bias, false otherwise
"""
is_bias_none = True
if not input_op.outputs:
is_bias_none = False
else:
# Bias is consumers of output_0 of the conv op, look for it
# output 0 is the bias tensor
bias_index = BiasUtils.get_bias_index_in_given_op(input_op)
for consumer in input_op.outputs[bias_index].consumers():
# Ignore Reshape as it can be placed between MatMul and BiasAdd on Dense layer of Transformer
if consumer.type in ['Reshape'] and len(consumer.outputs[0].consumers()) == 1:
consumer = consumer.outputs[0].consumers()[0]
# check the input types of the add or bias_add
if consumer.type == 'BiasAdd':
# check num tensors and op types coming into this bias add or add
assert len(consumer.inputs) == 2
# check if one of the inputs is ReadVariableOp type or Identity type
# when we add BiasAdd op to a conv op programmatically, the read op is 'Identity' type.
if consumer.inputs[constants.BIAS_ADD_CONSUMERS_INPUT_BIAS_READ_INDEX].op.type in \
['ReadVariableOp', 'Identity', 'QcQuantize']:
is_bias_none = False
break
return is_bias_none
@staticmethod
def get_shape(input_op: tf.Operation) -> tf.TensorShape:
"""
Returns the shape of the bias in the op specified
:param input_op: tf operation type (conv)
:return: shape of bias as a List type
"""
# infer bias shape from weight kernel shape
return [BiasUtils._get_bias_shape_from_weights(input_op)]
@staticmethod
def get_bias_read_op(input_op: tf.Operation) -> tf.Operation:
"""
Returns the read op associated with the bias in given op
:param input_op: operation for which the read op on bias is to be obtained
:return: read op associated with bias
"""
bias_read_op = None
bias_index = BiasUtils.get_bias_index_in_given_op(input_op)
if not BiasUtils.is_bias_none(input_op):
for consumer in input_op.outputs[bias_index].consumers():
# Ignore Reshape as it can be placed between MatMul and BiasAdd on Dense layer of Transformer
if consumer.type in ['Reshape'] and len(consumer.outputs[0].consumers()) == 1:
consumer = consumer.outputs[0].consumers()[0]
if consumer.type in ['Add', 'BiasAdd']:
assert len(consumer.inputs) == 2
# pick the bias ReadVariableOp type
bias_read_op = consumer.inputs[constants.BIAS_ADD_CONSUMERS_INPUT_BIAS_READ_INDEX].op
return bias_read_op
@staticmethod
def get_bias_tensor(op: tf.Operation):
"""
return bias in the op as a tf tensor type
bias_add op has two inputs : conv's output and a bias_read op
:param op: tf operation to extract bias from.
:return : bias as tf tensor type, if found in the given op
"""
# bias tensor feeds into biasadd op through ReadVariableOp type
# bias add inputs[1] is the bias tensor we want.
bias_tensor = None
bias_add = BiasUtils.get_bias_add_read_var_op_tensor(op)
if bias_add is not None:
assert len(bias_add.inputs) == 2
# input to a bias add op is bias
bias_tensor = bias_add.inputs[constants.BIAS_ADD_READ_VAR_OP_BIAS_TENSOR_INDEX]
return bias_tensor
@staticmethod
def get_bias_add_read_var_op_tensor(input_op: tf.Operation) -> tf.Operation:
"""
Returns the readVariableOp tensor associated with bias add of given op
:param input_op: operation for which the bias add op is to be obtained
:return: bias_tensor associated with bias in the given conv op
"""
bias_add_op = None
bias_index = BiasUtils.get_bias_index_in_given_op(input_op)
if not BiasUtils.is_bias_none(input_op):
for consumer in input_op.outputs[bias_index].consumers():
# Ignore Reshape as it can be placed between MatMul and BiasAdd on Dense layer of Transformer
if consumer.type in ['Reshape'] and len(consumer.outputs[0].consumers()) == 1:
consumer = consumer.outputs[0].consumers()[0]
if consumer.type in ['Add', 'BiasAdd']:
bias_add_op = consumer
return bias_add_op
@staticmethod
def get_bias_as_numpy_data(sess: tf.compat.v1.Session, op: tf.Operation) -> tf.Variable:
"""
return bias in the op as a tf variable type
:param sess: TensorFlow session
:param op: tf operation to extract weight tensor from.
:return : weight tensor as tf variable type, if found in the given op
"""
# bias tensor feeds into bias-add op through ReadVariableOp type
# bias add inputs[1] is the bias tensor we want to read
bias_tensor = BiasUtils.get_bias_tensor(op)
assert bias_tensor is not None
numpy_data = sess.run(bias_tensor)
return numpy_data
@staticmethod
def update_bias_for_quantized_op(sess: tf.compat.v1.Session, op: tf.Operation, bias_as_numpy_array,
is_bias_none: bool = False):
"""
update existing bias in given op with new bias value
creates and adds new bias if it does not exist.
Note :
Caller needs to perform a load and save of the graph
if this api is invoked for an op without existing bias.
:param sess: TensorFlow session
:param op:op for which the bias is to be updated
:param bias_as_numpy_array: new bias as a numpy array
:param is_bias_none: True if Bias is None
:return: None
"""
with sess.graph.as_default():
if not is_bias_none:
bias_tensor_as_read_var_op_input = BiasUtils.get_bias_tensor(op)
assert len(bias_tensor_as_read_var_op_input.op.inputs) == 8
bias_add = bias_tensor_as_read_var_op_input.op.inputs[constants.OP_BIAS_INDICES[op.type]]
bias_tensor = bias_add.op.inputs[constants.OP_BIAS_INDICES[op.type]]
assert BiasUtils.get_shape(op)[0] == bias_as_numpy_array.size
# use tensor name to lookup var type associated with it
assert bias_tensor is not None, ('Error, bias tensor lookup failed for op ', op.name)
bias_as_var = [var for var in tf.compat.v1.global_variables() if var.name == bias_tensor.name][0]
bias_as_var.load(bias_as_numpy_array, sess)
@staticmethod
def update_bias_for_op(sess: tf.compat.v1.Session, op: tf.Operation, bias_as_numpy_array,
bias_name="bias_value"):
"""
update existing bias in given op with new bias value
creates and adds new bias if it does not exist.
Note :
Caller needs to perform a load and save of the graph
if this api is invoked for an op without existing bias.
:param sess: TensorFlow session
:param op:op for which the bias is to be updated
:param bias_as_numpy_array: new bias as a numpy array
:param bias_name: optional name can be specified by user
:return: None
"""
with sess.graph.as_default():
if not BiasUtils.is_bias_none(op):
bias_tensor_as_read_var_op_input = BiasUtils.get_bias_tensor(op)
assert len(bias_tensor_as_read_var_op_input.op.inputs) == 1
bias_tensor = bias_tensor_as_read_var_op_input.op.inputs[constants.OP_BIAS_INDICES[op.type]]
assert BiasUtils.get_shape(op)[0] == bias_as_numpy_array.size
# use tensor name to lookup var type associated with it
assert bias_tensor is not None, ('Error, bias tensor lookup failed for op ', op.name)
bias_as_var = [var for var in tf.compat.v1.global_variables() if var.name == bias_tensor.name][0]
bias_as_var.load(bias_as_numpy_array, sess)
else:
# _create_bias_add_op_and_insert
new_bias_var = tf.Variable(initial_value=bias_as_numpy_array, name=bias_name, dtype=tf.float32)
BiasUtils._create_bias_add_op_and_insert(sess, op, new_bias_var, bias_name)
def get_conv2d_op_params(op: tf.Operation) -> (Tuple, Tuple, Tuple):
"""
Get Conv2d op's parameters
:param op: TensorFlow Op
:return: (strides, padding, groups)
"""
strides = op.get_attr('strides')
data_format = op.get_attr('data_format')
padding = op.get_attr('padding')
if str(data_format.decode("utf-8")) == "NHWC":
strides = (strides[1], strides[2])
elif str(data_format.decode("utf-8")) == "NCHW":
strides = (strides[2], strides[3])
else:
raise ValueError("unknown data format")
# For Conv2D op groups should be 1
groups = 1
return strides, padding, groups
def get_strides_for_split_conv_ops(op: tf.Operation) -> (List, List):
"""
:param op: TensorFlow Op
:return: (conv_a_strides, conv_b_strides)
"""
if not op.type == 'Conv2D':
raise ValueError("Only Conv2d op can be split")
strides = op.get_attr("strides")
data_format = op.get_attr("data_format")
if str(data_format.decode("utf-8")) == "NHWC":
conv_a_strides = [strides[1], 1]
conv_b_strides = [1, strides[2]]
elif str(data_format.decode("utf-8")) == "NCHW":
conv_a_strides = [strides[2], 1]
conv_b_strides = [1, strides[3]]
else:
raise ValueError("Unknown data format!")
return conv_a_strides, conv_b_strides
def get_weight_shape(op: tf.Operation) -> List:
"""
Weight shape of an Op in Common format
Common format
Conv2D - [Noc, Nic, k_h, k_w]
MatMul - [Noc, Nic]
:param op: TensorFlow Op
:return: shape
"""
weight_index = WeightTensorUtils.get_tensor_index_in_given_op(input_op=op)
weight_shape = op.inputs[weight_index].get_shape().as_list()
# Conv2D weights are stored in the order [kh, kw, Nic, Noc] in TensorFlow
# Re-order them to the form [Noc, Nic, kh, kw]
if op.type == 'Conv2D':
weight_shape = [weight_shape[3], weight_shape[2], weight_shape[0], weight_shape[1]]
# FC weights are stored in order [Nic, Noc] in TensorFlow
# Re-order them to the form [Noc, Nic]
elif op.type == 'MatMul':
weight_shape = [weight_shape[1], weight_shape[0]]
else:
raise ValueError('op type not supported!')
return weight_shape
def get_output_activation_shape(sess: tf.compat.v1.Session, op: tf.Operation, input_op_names: List[str],
input_shape: Union[Tuple, List[Tuple]]) -> List:
"""
Output activation shape in the Common format [NCHW]
:param sess: TensorFlow Session
:param op: TensorFlow op
:param input_op_names: list of input op names of model
:param input_shape: tuple or list of tuple of input shape of model
:return: output_shape in Common format [NCHW]
"""
if op.type == 'MatMul':
output_shape = get_matmul_activation_shape(op=op, input_activation=False)
elif op.type == 'Conv2D':
output_shape = get_conv2d_activation_shape(sess, op, input_op_names, input_shape, input_activation=False)
else:
raise ValueError("Op type is not supported!")
return output_shape
def get_conv2d_activation_shape(sess: tf.compat.v1.Session, op: tf.Operation, input_op_names: List[str],
input_shape: Union[Tuple, List[Tuple]], input_activation: bool) -> List:
"""
:param sess: TensorFlow Session
:param op: TensorFlow op
:param input_op_names: list of input op names of model
:param input_shape: tuple or list of tuple of input shape of model
:param input_activation: whether input / output activation shape
:return: List of input / output activation shape in Common format [NCHW]
"""
# use static shape for input / output activations
if input_activation:
activation_shape = op.inputs[0].get_shape().as_list()
else:
activation_shape = op.outputs[0].get_shape().as_list()
data_format = op.get_attr('data_format')
# convert input / output activation shape to Common format [NCHW], if channels_last
if str(data_format.decode("utf-8")) == "NHWC":
activation_shape = [activation_shape[0], activation_shape[3], activation_shape[1], activation_shape[2]]
# if the static shape is undefined, then find dynamic shape of input / output activations
if activation_shape[2] is None:
# get input data
input_data = create_rand_tensors_given_shapes(input_shape=input_shape)
# create feed_dict
feed_dict = create_input_feed_dict(graph=op.graph,
input_op_names_list=input_op_names,
input_data=input_data)
if input_activation:
# get the input activation shape by evaluating the input tensor
input_tensor = op.inputs[0]
activation_shape = input_tensor.eval(feed_dict=feed_dict, session=sess).shape
else:
# get the output activation shape by evaluating the output tensor
output_tensor = op.outputs[0]
activation_shape = output_tensor.eval(feed_dict=feed_dict, session=sess).shape
# convert output activation shape to Common format [NCHW], if channels_last
if str(data_format.decode("utf-8")) == "NHWC":
activation_shape = [activation_shape[0], activation_shape[3], activation_shape[1], activation_shape[2]]
return activation_shape
def get_matmul_activation_shape(op: tf.Operation, input_activation: bool) -> List:
"""
:param op: TensorFlow Operation
:param input_activation: whether input / output activation shape
:return: List activation shape [N, out_channels, 1, 1]
"""
assert op.type == 'MatMul'
# use static shape for output/input activations of matmul
if input_activation:
activation_shape = op.inputs[0].get_shape().as_list()
activation_shape.extend([1, 1])
return activation_shape
activation_shape = op.outputs[0].get_shape().as_list()
activation_shape.extend([1, 1])
return activation_shape
def get_layer_attributes(sess: tf.compat.v1.Session, op: tf.Operation, input_op_names: List[str],
input_shape: Union[Tuple, List[Tuple]]) -> (Tuple, Tuple, Tuple):
"""
Get attributes (kernel_size, stride, padding) of tf.nn.Conv2d Op
:param sess: TensorFLow Session
:param op: TensorFLow Operation
:param input_op_names: List of input op names of model
:param input_shape: tuple or list of tuple of input shape of model
:return: (kernel_size, stride, padding)
"""
# pylint: disable=too-many-locals
assert op.type == 'Conv2D'
stride = op.get_attr('strides')
data_format = op.get_attr('data_format')
output_activation_shape = get_conv2d_activation_shape(sess=sess, op=op, input_op_names=input_op_names,
input_shape=input_shape, input_activation=False)
input_activation_shape = get_conv2d_activation_shape(sess=sess, op=op, input_op_names=input_op_names,
input_shape=input_shape, input_activation=True)
_, _, activation_h, activation_w = output_activation_shape
output_shape = (activation_h, activation_w)
_, _, activation_h, activation_w = input_activation_shape
input_shape = (activation_h, activation_w)
# 'channels_last' format
if str(data_format.decode("utf-8")) == "NHWC":
stride = (int(stride[1]), int(stride[2]))
# 'channels_first' format
elif str(data_format.decode("utf-8")) == "NCHW":
stride = (int(stride[2]), int(stride[3]))
else:
raise ValueError("Unknown data format!")
# Conv2d weight shape in TensorFlow [kh, kw, Nic, Noc]
weight_index = WeightTensorUtils.get_tensor_index_in_given_op(input_op=op)
weight_shape = op.inputs[weight_index].shape
kernel_size = (int(weight_shape[0]), int(weight_shape[1]))
# get the padding for (height, width) dimension
padding = get_padding(input_shape=input_shape, output_shape=output_shape, kernel_size=kernel_size, stride=stride)
return kernel_size, stride, padding
def get_weight_tensor_with_shape(model: tf.compat.v1.Session, input_op: tf.Operation):
"""
generic function to extract weight tensor of a given conv/linear op
:param model: tf.compat.v1.Session type
:param input_op: input op as tf.Operation type
:return: weight and shape of tensor extracted from given op
"""
with model.graph.as_default():
weight_tensor = WeightTensorUtils.get_tensor_as_numpy_data(model, input_op)
# Conv2d weight shape in TensorFlow [kh, kw, Nic, Noc]
# re order in the common shape [Noc, Nic, kh, kw]
shape = WeightTensorUtils.get_tensor_shape(input_op)
wt_tensor = None
if input_op.type == 'DepthwiseConv2dNative':
# Depthwise conv layers in TF have outputs(Noc) set to 1.
# we will use format [Nic, Noc, kh, kw] -
# to be compatible with cpp backend.
wt_tensor = np.transpose(weight_tensor, (2, 3, 0, 1))
# [Nic, Noc, kh, kw]
shape = np.array([shape[2], shape[3], shape[0], shape[1]])
elif input_op.type == 'MatMul':
shape = np.concatenate((np.array([1, 1]), shape))
wt_tensor = np.transpose(weight_tensor, (1, 0))
# [Noc, Nic, kh, kw]
shape = np.array([shape[3], shape[2], shape[0], shape[1]])
elif input_op.type == 'Conv2D':
wt_tensor = np.transpose(weight_tensor, (3, 2, 0, 1))
# [Noc, Nic, kh, kw]
shape = np.array([shape[3], shape[2], shape[0], shape[1]])
else:
logger.error("_get_weight_tensor_transpose_reshape(): Operation type unsupported")
return wt_tensor, shape
| 42.761024
| 119
| 0.650943
|
b899ca78cd84558fbf3e5d8f498d70848bfc8f25
| 36,711
|
py
|
Python
|
drfact/run_drfact.py
|
alizaree/google-research
|
1969183efdb8fdaffa4ce05218a2c79ccfffe9a6
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:14:49.000Z
|
2022-03-30T07:14:49.000Z
|
drfact/run_drfact.py
|
alizaree/google-research
|
1969183efdb8fdaffa4ce05218a2c79ccfffe9a6
|
[
"Apache-2.0"
] | null | null | null |
drfact/run_drfact.py
|
alizaree/google-research
|
1969183efdb8fdaffa4ce05218a2c79ccfffe9a6
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Run training and evaluation for DrFact models."""
import collections
import functools
import json
import os
import re
import time
from absl import flags
from garcon.albert import tokenization as albert_tokenization
from bert import modeling
from bert import optimization
from bert import tokenization as bert_tokenization
from language.google.drfact import evaluate
from language.google.drfact import input_fns
from language.google.drfact import model_fns
from language.labs.drkit import search_utils
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import memory_stats as contrib_memory_stats
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("tokenizer_type", "bert_tokenization",
"The tokenizier type that the BERT model was trained on.")
flags.DEFINE_string("tokenizer_model_file", None,
"The tokenizier model that the BERT was trained with.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string(
"output_prediction_file", "test_predictions.json",
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None, "JSON for training.")
flags.DEFINE_string("predict_file", None, "JSON for predictions.")
flags.DEFINE_string("predict_prefix", "dev", "JSON for predictions.")
flags.DEFINE_string("test_file", None, "JSON for predictions.")
flags.DEFINE_string("data_type", "onehop",
"Whether queries are `onehop` or `twohop`.")
flags.DEFINE_string("model_type", "drkit",
"Whether to use `onehop` or `twohop` model.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("train_data_dir", None,
"Location of entity/mention/fact files for training data.")
flags.DEFINE_string("f2f_index_dir", None,
"Location of fact2fact files for training data.")
flags.DEFINE_string("test_data_dir", None,
"Location of entity/mention/fact files for test data.")
flags.DEFINE_string("model_ckpt_toload", "best_model",
"Name of the checkpoints.")
flags.DEFINE_string("test_model_ckpt", "best_model", "Name of the checkpoints.")
flags.DEFINE_string("embed_index_prefix", "bert_large", "Prefix of indexes.")
flags.DEFINE_integer("num_hops", 2, "Number of hops in rule template.")
flags.DEFINE_integer("max_entity_len", 4,
"Maximum number of tokens in an entity name.")
flags.DEFINE_integer(
"num_mips_neighbors", 100,
"Number of nearest neighbor mentions to retrieve for queries in each hop.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"projection_dim", None, "Number of dimensions to project embeddings to. "
"Set to None to use full dimensions.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool("do_test", False, "Whether to run eval on the test set.")
flags.DEFINE_float(
"subject_mention_probability", 0.0,
"Fraction of training instances for which we use subject "
"mentions in the text as opposed to canonical names.")
flags.DEFINE_integer("train_batch_size", 16, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 32,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 3e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 100,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 300,
"How many steps to make in each estimator call.")
flags.DEFINE_string("supervision", "entity",
"Type of supervision -- `mention` or `entity`.")
flags.DEFINE_float("entity_score_threshold", 1e-2,
"Minimum score of an entity to retrieve sparse neighbors.")
flags.DEFINE_float("fact_score_threshold", 1e-2,
"Minimum score of a fact to retrieve sparse neighbors.")
flags.DEFINE_float("softmax_temperature", 2.,
"Temperature before computing softmax.")
flags.DEFINE_string(
"sparse_reduce_fn", "max",
"Function to aggregate sparse search results for a set of "
"entities.")
flags.DEFINE_string("sparse_strategy", "dense_first",
"How to combine sparse and dense components.")
flags.DEFINE_boolean("intermediate_loss", False,
"Compute loss on intermediate layers.")
flags.DEFINE_boolean("light", False, "If true run in light mode.")
flags.DEFINE_boolean("is_excluding", False,
"If true exclude question and wrong choices' concepts.")
flags.DEFINE_string(
"qry_layers_to_use", "-1",
"Comma-separated list of layer representations to use as the fixed "
"query representation.")
flags.DEFINE_string(
"qry_aggregation_fn", "concat",
"Aggregation method for combining the outputs of layers specified using "
"`qry_layers`.")
flags.DEFINE_string(
"entity_score_aggregation_fn", "max",
"Aggregation method for combining the mention logits to entities.")
flags.DEFINE_float("question_dropout", 0.2,
"Dropout probability for question BiLSTMs.")
flags.DEFINE_integer("question_num_layers", 2,
"Number of layers for question BiLSTMs.")
flags.DEFINE_integer("num_preds", 100, "Use -1 for all predictions.")
flags.DEFINE_boolean(
"ensure_answer_sparse", False,
"If true, ensures answer is among sparse retrieval results"
"during training.")
flags.DEFINE_boolean(
"ensure_answer_dense", False,
"If true, ensures answer is among dense retrieval results "
"during training.")
flags.DEFINE_boolean(
"train_with_sparse", True,
"If true, multiplies logits with sparse retrieval results "
"during training.")
flags.DEFINE_boolean(
"predict_with_sparse", True,
"If true, multiplies logits with sparse retrieval results "
"during inference.")
flags.DEFINE_boolean("fix_sparse_to_one", True,
"If true, sparse search matrix is fixed to {0,1}.")
flags.DEFINE_boolean("l2_normalize_db", False,
"If true, pre-trained embeddings are normalized to 1.")
flags.DEFINE_boolean("load_only_bert", False,
"To load only BERT variables from init_checkpoint.")
flags.DEFINE_boolean(
"use_best_ckpt_for_predict", False,
"If True, loads the best_model checkpoint in model_dir, "
"instead of the latest one.")
flags.DEFINE_bool("profile_model", False, "Whether to run profiling.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_integer("random_seed", 1, "Random seed for reproducibility.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool("debug", False,
"If true, only print the flags but not run anything.")
class QAConfig(object):
"""Hyperparameters for the QA model."""
def __init__(self, qry_layers_to_use, qry_aggregation_fn, dropout,
qry_num_layers, projection_dim, num_entities, max_entity_len,
ensure_answer_sparse, ensure_answer_dense, train_with_sparse,
predict_with_sparse, fix_sparse_to_one, supervision,
l2_normalize_db, entity_score_aggregation_fn,
entity_score_threshold, fact_score_threshold,
softmax_temperature, sparse_reduce_fn, intermediate_loss,
train_batch_size, predict_batch_size, light, sparse_strategy,
load_only_bert):
self.qry_layers_to_use = [int(vv) for vv in qry_layers_to_use.split(",")]
self.qry_aggregation_fn = qry_aggregation_fn
self.dropout = dropout
self.qry_num_layers = qry_num_layers
self.projection_dim = projection_dim
self.num_entities = num_entities
self.max_entity_len = max_entity_len
self.load_only_bert = load_only_bert
self.ensure_answer_sparse = ensure_answer_sparse
self.ensure_answer_dense = ensure_answer_dense
self.train_with_sparse = train_with_sparse
self.predict_with_sparse = predict_with_sparse
self.fix_sparse_to_one = fix_sparse_to_one
self.supervision = supervision
self.l2_normalize_db = l2_normalize_db
self.entity_score_aggregation_fn = entity_score_aggregation_fn
self.entity_score_threshold = entity_score_threshold
self.fact_score_threshold = fact_score_threshold
self.softmax_temperature = softmax_temperature
self.sparse_reduce_fn = sparse_reduce_fn
self.intermediate_loss = intermediate_loss
self.train_batch_size = train_batch_size
self.predict_batch_size = predict_batch_size
self.light = light
self.sparse_strategy = sparse_strategy
class MIPSConfig(object):
"""Hyperparameters for the MIPS model of mention index."""
def __init__(self, ckpt_path, ckpt_var_name, num_mentions, emb_size,
num_neighbors):
self.ckpt_path = ckpt_path
self.ckpt_var_name = ckpt_var_name
self.num_mentions = num_mentions
self.emb_size = emb_size
self.num_neighbors = num_neighbors
class FactMIPSConfig(object):
"""Hyperparameters for the MIPS model of fact index."""
def __init__(self, ckpt_path, ckpt_var_name, num_facts, emb_size,
num_neighbors):
self.ckpt_path = ckpt_path
self.ckpt_var_name = ckpt_var_name
self.num_facts = num_facts
self.emb_size = emb_size
self.num_neighbors = num_neighbors
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu,
exclude_bert):
"""Creates an optimizer training op, optionally excluding BERT vars."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate +
is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = optimization.AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf_estimator.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
if exclude_bert:
bert_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "bert")
tvars = [vv for vv in tvars if vv not in bert_vars]
tf.logging.info("Training the following variables:")
for vv in tvars:
tf.logging.info(vv.name)
grads = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
def get_assignment_map_from_checkpoint(tvars,
init_checkpoint,
load_only_bert=False):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
if load_only_bert and ("bert" not in name):
continue
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def model_fn_builder(bert_config,
qa_config,
mips_config,
fact_mips_config,
init_checkpoint,
e2m_checkpoint,
m2e_checkpoint,
e2f_checkpoint,
f2e_checkpoint,
f2f_checkpoint,
entity_id_checkpoint,
entity_mask_checkpoint,
learning_rate,
num_train_steps,
num_warmup_steps,
use_tpu,
use_one_hot_embeddings,
create_model_fn,
summary_obj=None):
"""Returns `model_fn` closure for TPUEstimator."""
tf.random.set_random_seed(FLAGS.random_seed)
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
del labels, params # Not used.
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s", name, features[name].shape)
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
entity_ids = search_utils.load_database(
"entity_ids", [qa_config.num_entities, qa_config.max_entity_len],
entity_id_checkpoint,
dtype=tf.int32)
entity_mask = search_utils.load_database(
"entity_mask", [qa_config.num_entities, qa_config.max_entity_len],
entity_mask_checkpoint)
if FLAGS.model_type == "drkit":
# Initialize sparse tensor of ent2ment.
with tf.device("/cpu:0"):
tf_e2m_data, tf_e2m_indices, tf_e2m_rowsplits = (
search_utils.load_ragged_matrix("ent2ment", e2m_checkpoint))
with tf.name_scope("RaggedConstruction_e2m"):
e2m_ragged_ind = tf.RaggedTensor.from_row_splits(
values=tf_e2m_indices,
row_splits=tf_e2m_rowsplits,
validate=False)
e2m_ragged_val = tf.RaggedTensor.from_row_splits(
values=tf_e2m_data, row_splits=tf_e2m_rowsplits, validate=False)
tf_m2e_map = search_utils.load_database(
"coref", [mips_config.num_mentions], m2e_checkpoint, dtype=tf.int32)
total_loss, predictions = create_model_fn(
bert_config=bert_config,
qa_config=qa_config,
mips_config=mips_config,
is_training=is_training,
features=features,
ent2ment_ind=e2m_ragged_ind,
ent2ment_val=e2m_ragged_val,
ment2ent_map=tf_m2e_map,
entity_ids=entity_ids,
entity_mask=entity_mask,
use_one_hot_embeddings=use_one_hot_embeddings,
summary_obj=summary_obj,
num_preds=FLAGS.num_preds,
is_excluding=FLAGS.is_excluding,
)
elif FLAGS.model_type == "drfact":
# Initialize sparse tensor of ent2fact.
with tf.device("/cpu:0"): # Note: cpu or gpu?
tf_e2f_data, tf_e2f_indices, tf_e2f_rowsplits = (
search_utils.load_ragged_matrix("ent2fact", e2f_checkpoint))
with tf.name_scope("RaggedConstruction_e2f"):
e2f_ragged_ind = tf.RaggedTensor.from_row_splits(
values=tf_e2f_indices,
row_splits=tf_e2f_rowsplits,
validate=False)
e2f_ragged_val = tf.RaggedTensor.from_row_splits(
values=tf_e2f_data, row_splits=tf_e2f_rowsplits, validate=False)
# Initialize sparse tensor of fact2ent.
with tf.device("/cpu:0"):
tf_f2e_data, tf_f2e_indices, tf_f2e_rowsplits = (
search_utils.load_ragged_matrix("fact2ent", f2e_checkpoint))
with tf.name_scope("RaggedConstruction_f2e"):
f2e_ragged_ind = tf.RaggedTensor.from_row_splits(
values=tf_f2e_indices,
row_splits=tf_f2e_rowsplits,
validate=False)
f2e_ragged_val = tf.RaggedTensor.from_row_splits(
values=tf_f2e_data, row_splits=tf_f2e_rowsplits, validate=False)
# Initialize sparse tensor of fact2fact.
with tf.device("/cpu:0"):
tf_f2f_data, tf_f2f_indices, tf_f2f_rowsplits = (
search_utils.load_ragged_matrix("fact2fact", f2f_checkpoint))
with tf.name_scope("RaggedConstruction_f2f"):
f2f_ragged_ind = tf.RaggedTensor.from_row_splits(
values=tf_f2f_indices,
row_splits=tf_f2f_rowsplits,
validate=False)
f2f_ragged_val = tf.RaggedTensor.from_row_splits(
values=tf_f2f_data, row_splits=tf_f2f_rowsplits, validate=False)
total_loss, predictions = create_model_fn(
bert_config=bert_config,
qa_config=qa_config,
fact_mips_config=fact_mips_config,
is_training=is_training,
features=features,
ent2fact_ind=e2f_ragged_ind,
ent2fact_val=e2f_ragged_val,
fact2ent_ind=f2e_ragged_ind,
fact2ent_val=f2e_ragged_val,
fact2fact_ind=f2f_ragged_ind,
fact2fact_val=f2f_ragged_val,
entity_ids=entity_ids,
entity_mask=entity_mask,
use_one_hot_embeddings=use_one_hot_embeddings,
summary_obj=summary_obj,
num_preds=FLAGS.num_preds,
is_excluding=FLAGS.is_excluding,
)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map,
initialized_variable_names) = get_assignment_map_from_checkpoint(
tvars, init_checkpoint, load_only_bert=qa_config.load_only_bert)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
one_mb = tf.constant(1024 * 1024, dtype=tf.int64)
devices = tf.config.experimental.list_logical_devices("GPU")
memory_footprints = []
for device in devices:
memory_footprint = tf.print(
device.name,
contrib_memory_stats.MaxBytesInUse() / one_mb, " / ",
contrib_memory_stats.BytesLimit() / one_mb)
memory_footprints.append(memory_footprint)
with tf.control_dependencies(memory_footprints):
train_op = create_optimizer(total_loss, learning_rate, num_train_steps,
num_warmup_steps, use_tpu, False)
output_spec = tf_estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.PREDICT:
output_spec = tf_estimator.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and PREDICT modes are supported: %s" %
(mode))
return output_spec
return model_fn
def train(dataset, estimator, num_train_steps):
"""Run one training loop over given TFRecords file."""
if FLAGS.profile_model:
hooks = [
tf.train.ProfilerHook(
output_dir=estimator.model_dir, save_secs=100, show_memory=False)
]
tf.logging.info("Saving profiling output to %s", estimator.model_dir)
else:
hooks = None
estimator.train(
input_fn=dataset.input_fn, max_steps=num_train_steps, hooks=hooks)
def single_eval(eval_dataset, estimator, ckpt_path, mention2text, entityid2name,
supervision, output_prediction_file, eval_fn, paragraphs,
mentions, **kwargs):
"""Run one evaluation using given checkpoint."""
del mentions # Not used.
tf.logging.info("***** Running predictions using %s *****", ckpt_path)
tf.logging.info(" Num eval examples = %d", len(eval_dataset.examples))
tf.logging.info(" Eval Batch size = %d", FLAGS.predict_batch_size)
# Collect ground truth answers.
if supervision == "mention":
name_map = mention2text
else:
name_map = entityid2name
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for batched_result in estimator.predict(
eval_dataset.input_fn,
yield_single_examples=False,
checkpoint_path=ckpt_path):
if not all_results:
t_st = time.time()
# print("batched_result", batched_result) # Debug
cur_bsz = len(batched_result["qas_ids"])
for ii in range(cur_bsz):
result = {}
for r_key, values in batched_result.items():
result[r_key] = values[ii]
all_results.append(result)
if len(all_results) % 100 == 0:
tf.logging.info("Processing example: %d at single_eval",
len(all_results))
total_time = time.time() - t_st
# Compute metrics.
metrics = eval_fn(
eval_dataset,
all_results,
name_map,
output_prediction_file,
paragraphs,
supervision=supervision,
**kwargs)
metrics["QPS"] = float(len(all_results)) / total_time
return metrics
def _copy_model(in_path, out_path):
"""Copy model checkpoint for future use."""
tf.logging.info("Copying checkpoint from %s to %s.", in_path, out_path)
tf.gfile.Copy(
in_path + ".data-00000-of-00001",
out_path + ".data-00000-of-00001",
overwrite=True)
tf.gfile.Copy(in_path + ".index", out_path + ".index", overwrite=True)
tf.gfile.Copy(in_path + ".meta", out_path + ".meta", overwrite=True)
def continuous_eval(eval_dataset, estimator, mention2text, entityid2name,
supervision, eval_fn, paragraphs, mentions, **kwargs):
"""Run continuous evaluation on given TFRecords file."""
current_ckpt = 0
best_acc = 0
stop_evaluating = False
if not tf.gfile.Exists(os.path.join(FLAGS.output_dir, "eval")):
tf.gfile.MakeDirs(os.path.join(FLAGS.output_dir, "eval"))
event_writer = tf.summary.FileWriter(os.path.join(FLAGS.output_dir, "eval"))
while not stop_evaluating:
if FLAGS.use_best_ckpt_for_predict:
ckpt_path = os.path.join(FLAGS.output_dir, FLAGS.model_ckpt_toload)
if not tf.gfile.Exists(ckpt_path + ".meta"):
tf.logging.info("No best_model checkpoint found in %s",
FLAGS.output_dir)
tf.logging.info("Skipping evaluation.")
break
output_prediction_file = os.path.join(
FLAGS.output_dir, "%s.predictions.json" % FLAGS.predict_prefix)
stop_evaluating = True
else:
ckpt_path = tf.train.latest_checkpoint(FLAGS.output_dir)
if ckpt_path == current_ckpt:
tf.logging.info("No new checkpoint in %s", FLAGS.output_dir)
tf.logging.info("Waiting for 10s")
time.sleep(10)
continue
current_ckpt = ckpt_path
model_name = None
if ckpt_path is not None:
model_name = os.path.basename(ckpt_path)
output_prediction_file = os.path.join(FLAGS.output_dir,
"predictions_%s.json" % model_name)
metrics = single_eval(eval_dataset, estimator, ckpt_path, mention2text,
entityid2name, supervision, output_prediction_file,
eval_fn, paragraphs, mentions, **kwargs)
tf.logging.info("Previous best accuracy: %.4f", best_acc)
tf.logging.info("Current accuracy: %.4f", metrics["accuracy"])
if ckpt_path is not None and not FLAGS.use_best_ckpt_for_predict:
ckpt_number = int(ckpt_path.rsplit("-", 1)[1])
if metrics["accuracy"] > best_acc:
best_acc = metrics["accuracy"]
if tf.gfile.Exists(ckpt_path + ".meta"):
_copy_model(ckpt_path, os.path.join(FLAGS.output_dir, "best_model"))
else:
ckpt_number = 0
for metric, value in metrics.items():
tf.logging.info("%s: %.4f", metric, value)
if not FLAGS.use_best_ckpt_for_predict:
curr_summary = tf.Summary(value=[
tf.Summary.Value(tag=metric, simple_value=value),
])
event_writer.add_summary(curr_summary, global_step=ckpt_number)
def validate_flags_or_throw():
"""Validate the input FLAGS or throw an exception."""
if (not FLAGS.do_train and not FLAGS.do_predict and not FLAGS.do_test):
raise ValueError("At least one of `do_train`, `do_predict` or "
"`do_test` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.debug:
print(FLAGS)
return
# Decide data type.
if FLAGS.data_type == "hotpotqa":
dataset_class = input_fns.OpenCSRDataset
eval_fn = evaluate.opencsr_eval_fn
# Decide model type.
if FLAGS.model_type == "drkit":
create_model_fn = functools.partial(
model_fns.create_drkit_model, num_hops=FLAGS.num_hops)
elif FLAGS.model_type == "drfact":
create_model_fn = functools.partial(
model_fns.create_drfact_model, num_hops=FLAGS.num_hops)
else:
tf.logging.info("Wrong model_type...")
# Load BERT.
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
# Load mention and entity files.
mention2text = json.load(
tf.gfile.Open(os.path.join(FLAGS.train_data_dir, "mention2text.json")))
tf.logging.info("Loading metadata about entities and mentions...")
entity2id, entity2name = json.load(
tf.gfile.Open(os.path.join(FLAGS.train_data_dir, "entities.json")))
entityid2name = {str(i): entity2name[e] for e, i in entity2id.items()}
all_paragraphs = json.load(
tf.gfile.Open(os.path.join(FLAGS.train_data_dir, "subparas.json")))
all_mentions = np.load(
tf.gfile.Open(os.path.join(FLAGS.train_data_dir, "mentions.npy"), "rb"))
qa_config = QAConfig(
qry_layers_to_use=FLAGS.qry_layers_to_use,
qry_aggregation_fn=FLAGS.qry_aggregation_fn,
dropout=FLAGS.question_dropout,
qry_num_layers=FLAGS.question_num_layers,
projection_dim=FLAGS.projection_dim,
load_only_bert=FLAGS.load_only_bert,
num_entities=len(entity2id),
max_entity_len=FLAGS.max_entity_len,
ensure_answer_sparse=FLAGS.ensure_answer_sparse,
ensure_answer_dense=FLAGS.ensure_answer_dense,
train_with_sparse=FLAGS.train_with_sparse,
predict_with_sparse=FLAGS.predict_with_sparse,
fix_sparse_to_one=FLAGS.fix_sparse_to_one,
supervision=FLAGS.supervision,
l2_normalize_db=FLAGS.l2_normalize_db,
entity_score_aggregation_fn=FLAGS.entity_score_aggregation_fn,
entity_score_threshold=FLAGS.entity_score_threshold,
fact_score_threshold=FLAGS.fact_score_threshold,
softmax_temperature=FLAGS.softmax_temperature,
sparse_reduce_fn=FLAGS.sparse_reduce_fn,
intermediate_loss=FLAGS.intermediate_loss,
light=FLAGS.light,
sparse_strategy=FLAGS.sparse_strategy,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
mips_config = MIPSConfig(
ckpt_path=os.path.join(FLAGS.train_data_dir,
"%s_mention_feats" % FLAGS.embed_index_prefix),
ckpt_var_name="db_emb",
num_mentions=len(mention2text),
emb_size=FLAGS.projection_dim * 2,
num_neighbors=FLAGS.num_mips_neighbors)
fact_mips_config = FactMIPSConfig(
ckpt_path=os.path.join(FLAGS.train_data_dir,
"%s_fact_feats" % FLAGS.embed_index_prefix),
ckpt_var_name="fact_db_emb",
num_facts=len(all_paragraphs),
emb_size=FLAGS.projection_dim * 2,
num_neighbors=FLAGS.num_mips_neighbors)
validate_flags_or_throw()
tf.gfile.MakeDirs(FLAGS.output_dir)
# Save training flags.
if FLAGS.do_train:
json.dump(tf.app.flags.FLAGS.flag_values_dict(),
tf.gfile.Open(os.path.join(FLAGS.output_dir, "flags.json"), "w"))
# tokenizer = tokenization.FullTokenizer(
# vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
if FLAGS.tokenizer_type == "bert_tokenization":
tokenizer = bert_tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=True)
elif FLAGS.tokenizer_type == "albert_tokenization":
tokenizer = albert_tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file,
do_lower_case=False,
spm_model_file=FLAGS.tokenizer_model_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf_estimator.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf_estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=50,
tpu_config=tf_estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host),
session_config=tf.ConfigProto(log_device_placement=False))
num_train_steps = None
num_warmup_steps = None
if FLAGS.num_preds < 0:
FLAGS.num_preds = len(entity2id)
if FLAGS.do_train:
train_dataset = dataset_class(
in_file=FLAGS.train_file,
tokenizer=tokenizer,
subject_mention_probability=FLAGS.subject_mention_probability,
max_qry_length=FLAGS.max_query_length,
is_training=True,
entity2id=entity2id,
tfrecord_filename=os.path.join(FLAGS.output_dir, "train.tf_record"))
num_train_steps = int(train_dataset.num_examples / FLAGS.train_batch_size *
FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
if FLAGS.do_predict:
eval_dataset = dataset_class(
in_file=FLAGS.predict_file,
tokenizer=tokenizer,
subject_mention_probability=0.0,
max_qry_length=FLAGS.max_query_length,
is_training=False,
entity2id=entity2id,
tfrecord_filename=os.path.join(
FLAGS.output_dir, "eval.%s.tf_record" % FLAGS.predict_prefix))
qa_config.predict_batch_size = FLAGS.predict_batch_size
summary_obj = None
# summary_obj = summary.TPUSummary(FLAGS.output_dir,
# FLAGS.save_checkpoints_steps)
model_fn = model_fn_builder(
bert_config=bert_config,
qa_config=qa_config,
mips_config=mips_config,
fact_mips_config=fact_mips_config,
init_checkpoint=FLAGS.init_checkpoint,
e2m_checkpoint=os.path.join(FLAGS.train_data_dir, "ent2ment.npz"),
m2e_checkpoint=os.path.join(FLAGS.train_data_dir, "coref.npz"),
e2f_checkpoint=os.path.join(FLAGS.train_data_dir, "ent2fact.npz"),
f2e_checkpoint=os.path.join(FLAGS.train_data_dir, "fact_coref.npz"),
f2f_checkpoint=os.path.join(FLAGS.f2f_index_dir, "fact2fact.npz"),
entity_id_checkpoint=os.path.join(FLAGS.train_data_dir, "entity_ids"),
entity_mask_checkpoint=os.path.join(FLAGS.train_data_dir, "entity_mask"),
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
create_model_fn=create_model_fn,
summary_obj=summary_obj)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
if FLAGS.do_train or FLAGS.do_predict:
estimator = tf_estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", train_dataset.num_examples)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train(train_dataset, estimator, num_train_steps)
if FLAGS.do_predict:
continuous_eval(
eval_dataset,
estimator,
mention2text,
entityid2name,
qa_config.supervision,
eval_fn,
paragraphs=all_paragraphs,
mentions=all_mentions)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 37.924587
| 80
| 0.692953
|
9f60f73f3a0981f511924cd9a460decb5235effc
| 135
|
py
|
Python
|
vendor/PropertyKits/src/properties/app_utils.py
|
huangtao/cloud-test
|
8087b1337d47daab9eb39335ca6e286df0e4b4dc
|
[
"Apache-2.0"
] | 1
|
2018-09-12T15:43:32.000Z
|
2018-09-12T15:43:32.000Z
|
vendor/PropertyKits/src/properties/app_utils.py
|
huangtao/cloud-test
|
8087b1337d47daab9eb39335ca6e286df0e4b4dc
|
[
"Apache-2.0"
] | null | null | null |
vendor/PropertyKits/src/properties/app_utils.py
|
huangtao/cloud-test
|
8087b1337d47daab9eb39335ca6e286df0e4b4dc
|
[
"Apache-2.0"
] | 3
|
2018-09-12T15:43:33.000Z
|
2019-07-10T09:50:15.000Z
|
'''
Created on 2017年1月18日
@author: AppleWang
'''
#应用名称、应用包名、应用PID、UID、手机型号、Android系统版本、手机内存大小、手机CPU型号、测试日期
#内存/CPU 最大值、最小值、平均值
| 16.875
| 58
| 0.711111
|
4b8c6b9a9c2177e02b752d6223daafa6453cd4b5
| 551
|
py
|
Python
|
src/aihwkit/cloud/converter/definitions/__init__.py
|
todd-deshane/aihwkit
|
07269e29731f9a6482d25326400437f6bef2fc94
|
[
"Apache-2.0"
] | 133
|
2020-09-17T20:36:08.000Z
|
2022-03-21T12:15:40.000Z
|
src/aihwkit/cloud/converter/definitions/__init__.py
|
todd-deshane/aihwkit
|
07269e29731f9a6482d25326400437f6bef2fc94
|
[
"Apache-2.0"
] | 140
|
2020-09-21T12:16:55.000Z
|
2022-03-31T18:07:37.000Z
|
src/aihwkit/cloud/converter/definitions/__init__.py
|
todd-deshane/aihwkit
|
07269e29731f9a6482d25326400437f6bef2fc94
|
[
"Apache-2.0"
] | 53
|
2020-09-17T15:53:31.000Z
|
2022-03-30T12:22:04.000Z
|
# -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Protobuf definitions for the AIHW Composer API."""
| 39.357143
| 77
| 0.745917
|
6cf8e33452cd93d9a1474ed5f07547200c7c2b94
| 4,212
|
py
|
Python
|
python/cuml/dask/decomposition/base.py
|
teju85/cuml
|
91ddc9eb557d7dc948e8394755890ee4a3102efd
|
[
"Apache-2.0"
] | 1
|
2021-04-06T14:24:25.000Z
|
2021-04-06T14:24:25.000Z
|
python/cuml/dask/decomposition/base.py
|
teju85/cuml
|
91ddc9eb557d7dc948e8394755890ee4a3102efd
|
[
"Apache-2.0"
] | 1
|
2020-03-05T02:25:50.000Z
|
2020-03-05T02:25:50.000Z
|
python/cuml/dask/decomposition/base.py
|
teju85/cuml
|
91ddc9eb557d7dc948e8394755890ee4a3102efd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common import raise_exception_from_futures
from cuml.dask.common.comms import worker_state, CommsContext
from cuml.dask.common.input_utils import to_output
from cuml.dask.common.part_utils import flatten_grouped_results
from dask.distributed import wait
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.input_utils import DistributedDataHandler
import cuml.common.logger as logger
class BaseDecomposition(BaseEstimator):
def __init__(self, model_func, client=None, verbosity=logger.LEVEL_INFO,
**kwargs):
"""
Constructor for distributed decomposition model
"""
super(BaseDecomposition, self).__init__(client=client,
verbosity=verbosity,
**kwargs)
self._model_func = model_func
# define attributes to make sure they
# are available even on untrained object
self.local_model = None
self.components_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.singular_values_ = None
class DecompositionSyncFitMixin(object):
@staticmethod
def _func_fit(m, dfs, M, N, partsToRanks, rank, _transform):
return m.fit(dfs, M, N, partsToRanks, rank, _transform)
def _fit(self, X, _transform=False):
"""
Fit the model with X.
Parameters
----------
X : dask cuDF input
"""
n_cols = X.shape[1]
data = DistributedDataHandler.create(data=X, client=self.client)
self.datatype = data.datatype
comms = CommsContext(comms_p2p=False)
comms.init(workers=data.workers)
data.calculate_parts_to_sizes(comms)
total_rows = data.total_rows
models = dict([(data.worker_info[wf[0]]["rank"], self.client.submit(
self._create_model,
comms.sessionId,
self._model_func,
self.datatype,
**self.kwargs,
pure=False,
workers=[wf[0]]))
for idx, wf in enumerate(data.worker_to_parts.items())])
pca_fit = dict([(wf[0], self.client.submit(
DecompositionSyncFitMixin._func_fit,
models[data.worker_info[wf[0]]["rank"]],
wf[1],
total_rows, n_cols,
data.parts_to_sizes[data.worker_info[wf[0]]["rank"]],
data.worker_info[wf[0]]["rank"],
_transform,
pure=False,
workers=[wf[0]]))
for idx, wf in enumerate(data.worker_to_parts.items())])
wait(list(pca_fit.values()))
raise_exception_from_futures(list(pca_fit.values()))
comms.destroy()
self.local_model = list(models.values())[0].result()
self.components_ = self.local_model.components_
self.explained_variance_ = self.local_model.explained_variance_
self.explained_variance_ratio_ = \
self.local_model.explained_variance_ratio_
self.singular_values_ = self.local_model.singular_values_
if _transform:
out_futures = flatten_grouped_results(self.client,
data.gpu_futures,
pca_fit)
return to_output(out_futures, self.datatype)
return self
return self
@staticmethod
def _create_model(sessionId, model_func, datatype, **kwargs):
handle = worker_state(sessionId)["handle"]
return model_func(handle, datatype, **kwargs)
| 32.651163
| 76
| 0.632004
|
e8d850673ae31ace7ee7bd3dc4b8d149879fa458
| 733
|
py
|
Python
|
figures/2C/panel.py
|
cravattlab/abbasov
|
86d8609f9cb1932855dae054936aeb8e5a534cea
|
[
"MIT"
] | 2
|
2021-02-17T01:21:47.000Z
|
2021-09-09T18:29:15.000Z
|
figures/2C/panel.py
|
cravattlab/abbasov
|
86d8609f9cb1932855dae054936aeb8e5a534cea
|
[
"MIT"
] | 1
|
2021-04-15T00:48:09.000Z
|
2021-04-16T18:33:08.000Z
|
figures/2C/panel.py
|
cravattlab/abbasov
|
86d8609f9cb1932855dae054936aeb8e5a534cea
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
df = pd.read_csv('../../peptides_chemotypes.tsv', sep='\t')
dup = df.copy()
dup.pop('description')
dup.pop('accession')
dup.pop('site')
df = df[~dup.duplicated()]
chemotypes = df.columns[7:]
print(chemotypes)
data = {}
for c in chemotypes:
data[c] = dict(unique=0, shared=0)
for idx, row in df.iterrows():
liganding_chemotypes = []
for c in chemotypes:
if row[c] >= 4.0:
liganding_chemotypes.append(c)
for c in liganding_chemotypes:
if len(liganding_chemotypes) == 1:
data[c]['unique'] = data[c]['unique'] + 1
else:
data[c]['shared'] = data[c]['shared'] + 1
df = pd.DataFrame(data).T
df.to_csv('figure 2c.csv')
| 24.433333
| 59
| 0.608458
|
f0878c25df07cc097cfa53ce051aff37e42ff06e
| 4,855
|
py
|
Python
|
CPR/balance_sheets.py
|
rsi-models/CPR
|
2c9e2eb36499e65facd2303f1189101cfd18b267
|
[
"MIT"
] | null | null | null |
CPR/balance_sheets.py
|
rsi-models/CPR
|
2c9e2eb36499e65facd2303f1189101cfd18b267
|
[
"MIT"
] | null | null | null |
CPR/balance_sheets.py
|
rsi-models/CPR
|
2c9e2eb36499e65facd2303f1189101cfd18b267
|
[
"MIT"
] | null | null | null |
from CPR import tools
from CPR import taxes
from CPR import simulator
def compute_bs_bef_ret(hh, year, common, prices):
"""
Function to compute the pre-retirement balance sheet.
Parameters
----------
hh: Hhold
household
year : int
year
common : Common
instance of the class Common
prices : Prices
instance of the class Prices
"""
nom = tools.create_nom(year, prices)
hh_srd = taxes.file_household(hh, year, common, prices)
for who, p in enumerate(hh.sp):
p.disp_inc_bef_ret = nom(hh_srd.sp[who].disp_inc)
compute_cons_bef_ret(hh, year, prices)
def compute_cons_bef_ret(hh, year, prices):
"""
Function to compute pre-retirement consumption.
Parameters
----------
hh: Hhold
household
year : int
year
prices : Prices
instance of the class Prices
"""
real = tools.create_real(year, prices)
hh.disp_inc_bef_ret = sum([p.disp_inc_bef_ret for p in hh.sp])
hh.debt_payments = sum([hh.debts[debt].payment for debt in hh.debts])
hh.cons_bef_ret_real = real(hh.disp_inc_bef_ret - hh.debt_payments)
def compute_bs_after_ret(hh, year, common, prices):
"""
Function to compute the post-retirement balance sheet.
Parameters
----------
hh: Hhold
household
year : int
year
common : Common
instance of the class Common
prices : Prices
instance of the class Prices
"""
nom, real = tools.create_nom_real(year, prices)
hh_tax = taxes.file_household(hh, year, common, prices)
for inc in ['fam_inc_tot', 'fam_after_tax_inc', 'fam_disp_inc']:
val = nom(getattr(hh_tax, inc))
setattr(hh, f'{inc}_after_ret', val)
taxes.get_gis_oas_allowances(hh, hh_tax, year, prices)
hh.debt_payments = sum([hh.debts[debt].payment for debt in hh.debts])
hh.cons_after_ret_real = real(hh.fam_disp_inc_after_ret - hh.debt_payments)
hh.cons_after_ret_real -= real(getattr(hh, 'imputed_rent', 0))
def add_output(hh, year, prices, key):
"""
Function to extract output variables.
Parameters
----------
hh: Hhold
household
year : int
year
prices : Prices
instance of the class Prices
key : str
before ("bef"), when first spouse retires ("part")
or after retirement ("aft")
"""
real = tools.create_real(year, prices)
for p in hh.sp:
hh.d_output[f'{p.who}wage_{key}'] = real(p.d_wages[year])
hh.d_output[f'{p.who}pension_{key}'] = real(p.pension)
for residence in hh.residences:
hh.d_output[f'{residence}_{key}'] = \
real(hh.residences[residence].balance)
business = real(hh.business.balance) if hasattr(hh, 'business') else 0
hh.d_output[f'business_{key}'] = business
if 'first_mortgage' in hh.debts:
hh.d_output[f'first_mortgage_balance_{key}'] = \
real(hh.debts['first_mortgage'].balance)
if key == 'bef':
hh.d_output[f'year_cons_bef'] = hh.cons_bef_ret_year
hh.d_output[f'cons_{key}'] = hh.cons_bef_ret_real
if key in ['bef', 'part']:
for p in hh.sp:
rpp_dc = real(p.rpp_dc.balance) if hasattr(p, 'rpp_dc') else 0
hh.d_output[f'{p.who}rpp_dc_{key}'] = rpp_dc
for acc in p.fin_assets:
hh.d_output[f'{p.who}{acc}_balance_{key}'] = \
real(p.fin_assets[acc].balance)
if key in ['part', 'after']:
for p in hh.sp:
hh.d_output[f'{p.who}annuity_rrsp_{key}'] = p.annuity_rrsp_real
hh.d_output[f'{p.who}annuity_rpp_dc_{key}'] = p.annuity_rpp_dc_real
hh.d_output[f'{p.who}annuity_non_rrsp_{key}'] = \
p.annuity_non_rrsp_real
if key == 'after':
hh.d_output[f'year_cons_after'] = hh.cons_after_ret_year
hh.d_output[f'imputed_rent_{key}'] = real(getattr(hh, 'imputed_rent', 0))
hh.d_output[f'cons_{key}'] = hh.cons_after_ret_real
hh.d_output[f'debt_payments_{key}'] = real(hh.debt_payments)
hh.d_output[f'fam_net_tax_liability_{key}'] = real(
hh.fam_inc_tot_after_ret - hh.fam_after_tax_inc_after_ret)
for p in hh.sp:
hh.d_output[f'{p.who}cpp_{key}'] = real(p.cpp)
hh.d_output[f'{p.who}gis_{key}'] = real(p.inc_gis)
hh.d_output[f'{p.who}oas_{key}'] = real(p.inc_oas)
hh.d_output[f'{p.who}allow_surv_{key}'] = real(p.allow_surv)
hh.d_output[f'{p.who}allow_couple_{key}'] = real(p.allow_couple)
db_benefits = real(p.rpp_db.benefits) if hasattr(p, 'rpp_db') else 0
hh.d_output[f'{p.who}rpp_db_benefits_{key}'] = db_benefits
hh.d_output[f'{p.who}business_dividends_{key}'] = real(getattr(p, 'div_other_can', 0))
| 33.482759
| 98
| 0.617302
|
a9393f2df5647c15052254298b42055f08250722
| 1,876
|
py
|
Python
|
tool/eval_para_classification.py
|
SeonjeongHwang/xlnet_cqa
|
2bbc5afa03480fd507db062901b86d1274ef36d5
|
[
"Apache-2.0"
] | null | null | null |
tool/eval_para_classification.py
|
SeonjeongHwang/xlnet_cqa
|
2bbc5afa03480fd507db062901b86d1274ef36d5
|
[
"Apache-2.0"
] | null | null | null |
tool/eval_para_classification.py
|
SeonjeongHwang/xlnet_cqa
|
2bbc5afa03480fd507db062901b86d1274ef36d5
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.metrics import f1_score, accuracy_score, recall_score
import argparse
import sys
import json
def parse_args():
parser = argparse.ArgumentParser('Evaluation script for paraphrasing classification')
parser.add_argument('--data-file', dest="data_file", help='Input data JSON file.')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main(file):
y_true = []
y_pred = []
zero_true = []
zero_pred = []
one_true = []
one_pred = []
true_1 = 0
true_0 = 0
with open(file, "r") as f:
lines = json.load(f)
for line in lines:
y_true.append(line["label"])
if line["label"] == 1:
true_1 += 1
else:
true_0 += 1
y_pred.append(line["predicted_label"])
if line["label"] == 0:
zero_true.append(line["label"])
zero_pred.append(line["predicted_label"])
else:
one_true.append(line["label"])
one_pred.append(line["predicted_label"])
binary = f1_score(y_true, y_pred)
macro = f1_score(y_true, y_pred, average='macro')
acc = accuracy_score(y_true, y_pred)
zero_acc = accuracy_score(zero_true, zero_pred)
one_acc = accuracy_score(one_true, one_pred)
zero_recall = recall_score(y_true, y_pred, pos_label=0)
one_recall = recall_score(y_true, y_pred, pos_label=1)
print("label 1:", true_1)
print("label 0:", true_0)
print("binary:", binary)
print("macro:", macro)
print("acc:", acc)
print("zero:", zero_acc)
print("one:", one_acc)
print("zero recall:", zero_recall)
print("one: recall", one_recall)
if __name__ == '__main__':
args = parse_args()
main(args.data_file)
| 27.188406
| 89
| 0.58049
|
14226b4f70b68ad17a31bed860cc6e8506fdfc22
| 1,081
|
py
|
Python
|
plugins/weebify.py
|
xditya/PikaBotPlugins
|
2c5c52716158cd8964220bcc71fa383ccaf1210a
|
[
"Apache-2.0"
] | 2
|
2021-02-16T05:35:41.000Z
|
2021-05-25T16:59:47.000Z
|
plugins/weebify.py
|
xditya/PikaBotPlugins
|
2c5c52716158cd8964220bcc71fa383ccaf1210a
|
[
"Apache-2.0"
] | null | null | null |
plugins/weebify.py
|
xditya/PikaBotPlugins
|
2c5c52716158cd8964220bcc71fa383ccaf1210a
|
[
"Apache-2.0"
] | 2
|
2021-02-07T03:09:40.000Z
|
2021-05-25T16:59:59.000Z
|
""" Weebify a text,
Ported from Saitama Bot.
By :- @PhycoNinja13b
Modified by :- @kirito6969
.weeb <text> """
from telethon import events
from uniborg.util import ItzSjDude
normiefont = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
weebyfont = ['卂', '乃', '匚', '刀', '乇', '下', '厶', '卄', '工', '丁', '长', '乚', '从', '𠘨', '口', '尸', '㔿', '尺', '丂', '丅', '凵',
'リ', '山', '乂', '丫', '乙']
@ItzSjDude(outgoing=True, pattern="weeb ?(.*)")
async def weebify(event):
args = event.pattern_match.group(1)
if not args:
get = await event.get_reply_message()
args = get.text
if not args:
await event.edit("`What I am Supposed to Weebify U Dumb`")
return
string = ' '.join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await event.edit(string)
| 34.870968
| 118
| 0.539315
|
96395521ec6a85925c1d76ec243a00921f1cd5f7
| 2,258
|
py
|
Python
|
rllab/envs/mujoco/swimmer_env.py
|
RussellM2020/maml_gps
|
631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5
|
[
"MIT"
] | 541
|
2017-07-19T00:49:13.000Z
|
2022-03-28T21:14:23.000Z
|
rllab/envs/mujoco/swimmer_env.py
|
RussellM2020/maml_gps
|
631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5
|
[
"MIT"
] | 13
|
2018-02-28T02:29:58.000Z
|
2021-03-21T13:49:49.000Z
|
rllab/envs/mujoco/swimmer_env.py
|
RussellM2020/maml_gps
|
631560dfd4e23dc2da9bfbbd2e3c5252aa9775c5
|
[
"MIT"
] | 168
|
2017-07-19T12:21:01.000Z
|
2022-02-22T00:46:40.000Z
|
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from .mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc import autoargs
class SwimmerEnv(MujocoEnv, Serializable):
FILE = 'swimmer.xml'
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(SwimmerEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
#forward_reward = -1.5*np.abs(self.get_body_comvel("torso")[0] - 0.15)
# max achievable vel is around 0.20 for vpg.
reward = forward_reward - ctrl_cost
done = False
return Step(next_obs, reward, done)
@overrides
def log_diagnostics(self, paths, prefix=''):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
#if np.mean(progs) > 4.5:
# import pdb; pdb.set_trace()
#path = paths[0]
#t = -10
#lb, ub = self.action_bounds
#scaling = (ub - lb) * 0.5
#rew = path['rewards'][t]
#act = path['actions'][t]
#ctrl_cost = 0.5*self.ctrl_cost_coeff*np.sum(np.square(act/scaling))
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| 34.738462
| 78
| 0.61426
|
c81e13ca519f4e162afb1623ed39ba5aeac9bb05
| 2,866
|
py
|
Python
|
models/radom_forest_regression.py
|
tcsong456/Amazon_employee_access
|
75bdbde6d8f0434bc104aab8715fb564380e3c8a
|
[
"MIT"
] | null | null | null |
models/radom_forest_regression.py
|
tcsong456/Amazon_employee_access
|
75bdbde6d8f0434bc104aab8715fb564380e3c8a
|
[
"MIT"
] | null | null | null |
models/radom_forest_regression.py
|
tcsong456/Amazon_employee_access
|
75bdbde6d8f0434bc104aab8715fb564380e3c8a
|
[
"MIT"
] | null | null | null |
from models import register_model,register_model_architecture
from sklearn.ensemble import RandomForestRegressor
@register_model('random_forest')
class RFRegressor:
def __init__(self,args):
model = RandomForestRegressor(max_depth=args.max_depth,
min_samples_leaf=args.min_samples_leaf,
max_samples=args.max_samples,
max_features=args.max_features,
n_jobs=args.n_jobs,
bootstrap=args.bootstrap)
self.model = model
@staticmethod
def add_args(parser):
parser.add_argument('--max_depth',
help='The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until \
all leaves contain less than min_samples_split samples')
parser.add_argument('--min_samples_leaf',
help='The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered \
if it leaves at least min_samples_leaf training samples in each of the left and right branches')
parser.add_argument('--max_samples',
help='If bootstrap is True, the number of samples to draw from X to train each base estimator')
parser.add_argument('--max_features',
help="The number of features to consider when looking for the best split")
parser.add_argument('--n_jobs',
help='The number of jobs to run in parallel. fit, predict, decision_path and apply are all parallelized over the trees.\
None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. See Glossary for more details')
parser.add_argument('--bootstrap',
help='Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree')
@classmethod
def build_model(cls,args):
return cls(args)
def fit(self,X,y):
self.model.fit(X,y)
return self
def predict(self,X):
return self.model.predict(X)
def get_params(self):
return self.model.get_params()
@register_model_architecture('random_forest','rf_normal')
def rf_run(args):
args.max_depth = getattr(args,'max_depth',35)
args.min_samples_leaf = getattr(args,'min_samples_leaf',5)
args.max_samples = getattr(args,'max_samples',0.8)
args.max_features = getattr(args,'max_features',0.1)
args.n_jobs = getattr(args,'n_jobs',6)
args.bootstrap = getattr(args,'bootstrap',True)
return args
| 52.109091
| 149
| 0.606071
|
22f22f44258b6752c8443943815453f5810b1500
| 6,130
|
py
|
Python
|
lcip/libvirt.py
|
andrekeller/lcip
|
94147b24694e95d6c539857bddb4e0772d9a5cfd
|
[
"MIT"
] | null | null | null |
lcip/libvirt.py
|
andrekeller/lcip
|
94147b24694e95d6c539857bddb4e0772d9a5cfd
|
[
"MIT"
] | null | null | null |
lcip/libvirt.py
|
andrekeller/lcip
|
94147b24694e95d6c539857bddb4e0772d9a5cfd
|
[
"MIT"
] | null | null | null |
"""lcip - libvirt api"""
# stdlib
from contextlib import suppress
from functools import partial
from pathlib import Path
from xml.dom import minidom
import xml.etree.ElementTree as ElementTree
# 3rd-party
import libvirt
# lcip
from lcip.defaults import LIBVIRT_POOL_DIR
class Libvirt:
"""Libvirt interface"""
def __init__(self, pool, libvirt_url=None):
"""initialize connection to libvirt"""
self._connection = libvirt.open(libvirt_url) # pylint: disable=maybe-no-member
self._pool = self._connection.storagePoolLookupByName(pool)
if self._pool is None:
raise RuntimeError(f'libvirt storage pool {pool} not found')
def pool_refresh(self):
"""refresh storage pool"""
self._pool.refresh()
def define(self, xml):
"""define libvirt resource from xml"""
self._connection.defineXML(xml)
def start(self, name):
"""enable autostart and start domain"""
domain = self._connection.lookupByName(name)
domain.setAutostart(True)
domain.create()
def __del__(self):
"""ensure connection to libvirt is closed"""
if self._connection:
with suppress(libvirt.libvirtError): # pylint: disable=maybe-no-member
self._connection.close()
class LibvirtDomain:
"""Libvirt domain definition"""
def __init__(self, vmdefinition):
"""intialize new domain definition"""
self.vmdefinition = vmdefinition
# pylint: disable=too-many-arguments
def _add_element(self, parent, name, attributes=None, text=None, children=None):
"""simplified interface for adding xml elements"""
element = ElementTree.Element(name)
if attributes:
for attribute, value in attributes.items():
element.attrib[attribute] = value
if text:
element.text = text
if children:
for child, params in children.items():
self._add_element(element,
child,
text=params.get('text', None),
attributes=params.get('attributes', None),
children=params.get('children', None))
parent.append(element)
def _interface(self, bridge, vlan=None):
"""create libvirt interface definition"""
interface = ElementTree.Element('interface')
interface.attrib['type'] = 'bridge'
add_to_interface = partial(self._add_element, interface)
add_to_interface('source', attributes={'bridge': bridge})
add_to_interface('model', attributes={'type': 'virtio'})
add_to_interface('virtualport', attributes={'type': 'openvswitch'})
if vlan:
add_to_interface('vlan', children={'tag': {'attributes': {'id': str(vlan)}}})
return interface
def _disk(self, source, dev, driver='qcow2', readonly=False):
"""create libvirt disk definition"""
disk = ElementTree.Element('disk')
disk.attrib['type'] = 'file'
disk.attrib['device'] = 'disk'
add_to_disk = partial(self._add_element, disk)
add_to_disk('driver', attributes={'name': 'qemu', 'type': driver})
add_to_disk('source', attributes={'file': source})
add_to_disk('target', attributes={'dev': dev, 'bus': 'virtio'})
if readonly:
add_to_disk('readonly')
return disk
@property
def image(self):
"""path to domains root disk"""
return Path(LIBVIRT_POOL_DIR, f'{self.vmdefinition["fqdn"]}-root.qcow2')
@property
def seed(self):
"""path to domains seed image"""
return Path(LIBVIRT_POOL_DIR, f'{self.vmdefinition["fqdn"]}-seed.iso')
@property
def xml(self):
"""xml representation of libvirt domain"""
xml = ElementTree.Element('domain')
xml.attrib['type'] = 'kvm'
add_to_domain = partial(self._add_element, xml)
add_to_domain('name', text=self.vmdefinition['fqdn'])
add_to_domain('on_crash', text='destroy')
add_to_domain('on_poweroff', text='destroy')
add_to_domain('on_reboot', text='restart')
add_to_domain('vcpu', text=str(self.vmdefinition['cpu']))
add_to_domain('memory', attributes={'unit': 'MiB'}, text=str(self.vmdefinition['memory']))
add_to_domain('memoryBacking', children={'hugepages': {}})
add_to_domain('os', children={
'boot': {'attributes': {'dev': 'hd'}},
'type': {'text': 'hvm', 'attributes': {'arch': 'x86_64', 'machine': 'pc'}},
})
add_to_domain('features', children={
'acpi': {},
'apic': {},
})
add_to_domain('clock', attributes={'offset': 'utc'})
devices = ElementTree.Element('devices')
add_to_devices = partial(self._add_element, devices)
add_to_devices('emulator', text='/usr/bin/qemu-system-x86_64')
add_to_devices('console', attributes={'type': 'pty'})
add_to_devices('input', attributes={'type': 'keyboard', 'bus': 'ps2'})
add_to_devices(
'graphics',
attributes={'type': 'spice', 'port': '-1', 'tlsPort': '-1', 'autoport': 'yes'},
children={'image': {'attributes': {'compression': 'off'}}},
)
add_to_devices(
'video',
children={'model': {'attributes': {'type': 'virtio'}}},
)
devices.append(self._interface(
self.vmdefinition['network']['ovs_bridge'],
self.vmdefinition['network'].get('ovs_vlan', None),
))
devices.append(self._disk(
source=str(self.image),
dev='vda',
))
devices.append(self._disk(
source=str(self.seed),
dev='vdb',
driver='raw',
readonly=True,
))
add_to_devices('memballoon', attributes={'model': 'virtio'})
xml.append(devices)
return minidom.parseString(
ElementTree.tostring(xml, encoding='utf8')
).toprettyxml(indent=' ')
| 34.632768
| 98
| 0.589723
|
e6842e4dd150207fe88f6ed696a25bb025ce24a0
| 2,301
|
py
|
Python
|
deepr/config/references.py
|
drohde/deepr
|
672772ea3ce9cf391f9f8efc7ae9c9d438957817
|
[
"Apache-2.0"
] | 50
|
2020-05-19T17:29:44.000Z
|
2022-01-15T20:50:50.000Z
|
deepr/config/references.py
|
drohde/deepr
|
672772ea3ce9cf391f9f8efc7ae9c9d438957817
|
[
"Apache-2.0"
] | 75
|
2020-05-20T16:53:37.000Z
|
2022-01-12T15:53:46.000Z
|
deepr/config/references.py
|
drohde/deepr
|
672772ea3ce9cf391f9f8efc7ae9c9d438957817
|
[
"Apache-2.0"
] | 17
|
2020-05-25T13:23:03.000Z
|
2022-02-21T11:22:08.000Z
|
"""Helpers for references"""
import logging
from typing import Dict, Any
LOGGER = logging.getLogger(__name__)
REF = "@"
REF_SELF = "@self"
REF_MACROS = "@macros"
REF_MACROS_EVAL = "@macros_eval"
def fill_references(item: Any, references: Dict[str, Any] = None) -> Any:
"""Fill all params that are references, fail if not found.
Returns a new dictionary, tuple or list or item depending on item's
type.
Parameters that use the ref syntax "@reference" are replaced by the
relevant entry from references (`references['@reference']`).
If a reference is not found in `references`, raise `ValueError`
Parameters
----------
item : Any
Any item, but typically a Dict
references : Dict[str, Any], optional
Mapping of names to reference objects
Returns
-------
Any
Raises
------
ValueError
If some references are not found
"""
if isreference(item):
if references is None:
raise ValueError(f"Found reference {item} but references is None.")
if item not in references:
raise ValueError(f"Found reference {item} not in references")
return references[item]
if isinstance(item, dict):
return {key: fill_references(value, references) for key, value in item.items()}
if isinstance(item, list):
return [fill_references(it, references) for it in item]
if isinstance(item, tuple):
return tuple(fill_references(it, references) for it in item)
return item
def isreference(item) -> bool:
"""True if item is a string that looks like '@reference'."""
return isinstance(item, str) and item.startswith(REF)
def default_references(config: Dict, macros: Dict = None, macros_eval: Dict = None) -> Dict[str, Any]:
"""Create default references from config, macros and macros_eval.
Evaluation mode for the default references is set to "skip" to avoid
double evaluation of those nested references.
"""
config = {**config, "eval": None} if config is not None else None
macros = {**macros, "eval": None} if macros is not None else None
macros_eval = {**macros_eval, "eval": None} if macros_eval is not None else None
return {REF_SELF: config, REF_MACROS: macros, REF_MACROS_EVAL: macros_eval}
| 30.68
| 102
| 0.670143
|
25789f3f7cf091219529d281d3e7297b0c8669c0
| 311
|
py
|
Python
|
project/core/prod_settings.py
|
milkOSTpyt/Goods-accounting-system
|
ec223ed726cfa9f18d49c4233f9dc1520373c874
|
[
"BSD-3-Clause"
] | null | null | null |
project/core/prod_settings.py
|
milkOSTpyt/Goods-accounting-system
|
ec223ed726cfa9f18d49c4233f9dc1520373c874
|
[
"BSD-3-Clause"
] | null | null | null |
project/core/prod_settings.py
|
milkOSTpyt/Goods-accounting-system
|
ec223ed726cfa9f18d49c4233f9dc1520373c874
|
[
"BSD-3-Clause"
] | null | null | null |
SECRET_KEY = 'b#^di6u0zal6^2gr!aa3bje#z%5123!mya@)&3p-21d(=sj5%_y9n32122'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'gasdb',
'USER': 'artem',
'PASSWORD': 'gastest911',
'HOST': 'localhost',
'PORT': '5432',
}
}
| 23.923077
| 73
| 0.540193
|
b81d716c43b6f9a65d82ec101bcba66515329a37
| 6,766
|
py
|
Python
|
sc2/game_state.py
|
drakonnan1st/JackBot
|
345df784098cb9eb055b3901fe7455807c58a4e1
|
[
"MIT"
] | null | null | null |
sc2/game_state.py
|
drakonnan1st/JackBot
|
345df784098cb9eb055b3901fe7455807c58a4e1
|
[
"MIT"
] | null | null | null |
sc2/game_state.py
|
drakonnan1st/JackBot
|
345df784098cb9eb055b3901fe7455807c58a4e1
|
[
"MIT"
] | null | null | null |
"""Groups some info about the all global(shared by all races) state of sc2 so it can be used in an easy way"""
from typing import List, Set
from .data import ALLIANCE, DISPLAY_TYPE
from .ids.effect_id import EffectId
from .ids.upgrade_id import UpgradeId
from .pixel_map import PixelMap
from .position import Point2, Point3
from .power_source import PsionicMatrix
from .score import ScoreDetails
from .units import Units
from sc2.constants import UnitTypeId
class Blip:
"""Identifies and categorize the clocked units"""
def __init__(self, proto):
self.proto = proto
@property
def is_blip(self) -> bool:
"""Detected by sensor tower."""
return self.proto.is_blip
@property
def is_snapshot(self) -> bool:
"""Detected for just a small moment(f.e tanks that shoot you on the high ground)"""
return self.proto.display_type == DISPLAY_TYPE.Snapshot.value
@property
def is_visible(self) -> bool:
"""Detected- its outside the fog of war"""
return self.proto.display_type == DISPLAY_TYPE.Visible.value
@property
def alliance(self) -> ALLIANCE:
"""Its an ally's unit"""
return self.proto.alliance
@property
def is_mine(self) -> bool:
"""Its a bot's unit"""
return self.proto.alliance == ALLIANCE.Self.value
@property
def is_enemy(self) -> bool:
"""Its an enemy unit"""
return self.proto.alliance == ALLIANCE.Enemy.value
@property
def position(self) -> Point2:
"""2d position of the blip."""
return self.position3d.to2
@property
def position3d(self) -> Point3:
"""3d position of the blip."""
return Point3.from_proto(self.proto.pos)
class Common:
"""Groups every common attributes for every race"""
ATTRIBUTES = [
"player_id",
"minerals",
"vespene",
"food_cap",
"food_used",
"food_army",
"food_workers",
"idle_worker_count",
"army_count",
"warp_gate_count",
"larva_count",
]
def __init__(self, proto):
self.proto = proto
def __getattr__(self, attr):
assert attr in self.ATTRIBUTES, f"'{attr}' is not a valid attribute"
return int(getattr(self.proto, attr))
class EffectData:
"""Group all effects and its position"""
def __init__(self, proto):
self.proto = proto
@property
def id(self) -> EffectId:
"""Get the id of the effect"""
return EffectId(self.proto.effect_id)
@property
def positions(self) -> List[Point2]:
"""List all positions that are targets by the effect"""
return [Point2.from_proto(p) for p in self.proto.pos]
class GameState:
"""Groups most useful info about the game state"""
def __init__(self, response_observation, game_data):
self.actions = response_observation.actions
self.action_errors = response_observation.action_errors
self.observation = response_observation.observation
self.player_result = response_observation.player_result
self.chat = response_observation.chat
self.common: Common = Common(self.observation.player_common)
self.psionic_matrix: PsionicMatrix = PsionicMatrix.from_proto(self.observation.raw_data.player.power_sources)
self.game_loop: int = self.observation.game_loop
self.score: ScoreDetails = ScoreDetails(self.observation.score)
self.abilities = self.observation.abilities
destructible = [x for x in self.observation.raw_data.units if x.alliance == 3 and x.radius > 1.5]
self.destructible: Units = Units.from_proto(destructible, game_data)
visible_units, hidden_units, minerals, geysers, destructables, enemy, own = ([] for _ in range(7))
mineral_ids = {
UnitTypeId.RICHMINERALFIELD.value,
UnitTypeId.RICHMINERALFIELD750.value,
UnitTypeId.MINERALFIELD.value,
UnitTypeId.MINERALFIELD750.value,
UnitTypeId.LABMINERALFIELD.value,
UnitTypeId.LABMINERALFIELD750.value,
UnitTypeId.PURIFIERRICHMINERALFIELD.value,
UnitTypeId.PURIFIERRICHMINERALFIELD750.value,
UnitTypeId.PURIFIERMINERALFIELD.value,
UnitTypeId.PURIFIERMINERALFIELD750.value,
UnitTypeId.BATTLESTATIONMINERALFIELD.value,
UnitTypeId.BATTLESTATIONMINERALFIELD750.value,
}
geyser_ids = {
UnitTypeId.VESPENEGEYSER.value,
UnitTypeId.SPACEPLATFORMGEYSER.value,
UnitTypeId.RICHVESPENEGEYSER.value,
UnitTypeId.PROTOSSVESPENEGEYSER.value,
UnitTypeId.PURIFIERVESPENEGEYSER.value,
UnitTypeId.SHAKURASVESPENEGEYSER.value,
}
for unit in self.observation.raw_data.units:
if unit.is_blip:
hidden_units.append(unit)
else:
visible_units.append(unit)
# all destructible rocks except the one below the main base ramps
if unit.alliance == 3 and unit.radius > 1.5:
destructables.append(unit)
elif unit.alliance == 3:
# mineral field enums
if unit.unit_type in mineral_ids:
minerals.append(unit)
# geyser enums
elif unit.unit_type in geyser_ids:
geysers.append(unit)
elif unit.alliance == 1:
own.append(unit)
elif unit.alliance == 4:
enemy.append(unit)
self.own_units: Units = Units.from_proto(own, game_data)
self.enemy_units: Units = Units.from_proto(enemy, game_data)
self.mineral_field: Units = Units.from_proto(minerals, game_data)
self.vespene_geyser: Units = Units.from_proto(geysers, game_data)
self.destructables: Units = Units.from_proto(destructables, game_data)
self.units: Units = Units.from_proto(visible_units, game_data)
self.distance_units: Units = Units.from_proto(own + enemy + minerals + geysers, game_data)
self.blips: Set[Blip] = {Blip(unit) for unit in hidden_units}
self.visibility: PixelMap = PixelMap(self.observation.raw_data.map_state.visibility)
self.creep: PixelMap = PixelMap(self.observation.raw_data.map_state.creep)
self.dead_units: Set[int] = {dead_unit_tag for dead_unit_tag in self.observation.raw_data.event.dead_units}
self.effects: Set[EffectData] = {EffectData(effect) for effect in self.observation.raw_data.effects}
self.upgrades: Set[UpgradeId] = {UpgradeId(upgrade) for upgrade in self.observation.raw_data.player.upgrade_ids}
| 38.662857
| 120
| 0.649719
|
8af7f9232212e89562f8a8bef7251b906a81f380
| 7,353
|
py
|
Python
|
nova/tests/functional/regressions/test_bug_1848343.py
|
docc-lab/nova
|
a948a803b561606a892133940915caae610c080b
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/regressions/test_bug_1848343.py
|
docc-lab/nova
|
a948a803b561606a892133940915caae610c080b
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/regressions/test_bug_1848343.py
|
docc-lab/nova
|
a948a803b561606a892133940915caae610c080b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import instance_actions
from nova.compute import manager as compute_manager
from nova.scheduler.client import query as query_client
from nova.tests.functional import integrated_helpers
class DeletedServerAllocationRevertTest(
integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for bug 1848343 introduced in Queens where reverting a
migration-based allocation can re-create and leak allocations for a
deleted server if the server is deleted during a migration (resize,
cold or live).
"""
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
super(DeletedServerAllocationRevertTest, self).setUp()
# Start two computes so we can migrate between them.
self._start_compute('host1')
self._start_compute('host2')
def _create_server(self):
"""Creates and return a server along with a source host and target
host.
"""
server = self._build_server(networks='none')
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(server, 'ACTIVE')
source_host = server['OS-EXT-SRV-ATTR:host']
target_host = 'host2' if source_host == 'host1' else 'host1'
return server, source_host, target_host
def _assert_no_allocations(self, server):
# There should be no allocations on either host1 or host2.
providers = self._get_all_providers()
for rp in providers:
allocations = self._get_allocations_by_provider_uuid(rp['uuid'])
# FIXME(mriedem): This is bug 1848343 where rollback
# reverts the allocations and moves the source host allocations
# held by the migration consumer back to the now-deleted instance
# consumer.
if rp['name'] == server['OS-EXT-SRV-ATTR:host']:
self.assertFlavorMatchesAllocation(
server['flavor'], server['id'], rp['uuid'])
else:
self.assertEqual({}, allocations,
'Leaked allocations on provider: %s (%s)' %
(rp['uuid'], rp['name']))
def _disable_target_host(self, target_host):
# Disable the target compute service to trigger a NoValidHost from
# the scheduler which happens after conductor has moved the source
# node allocations to the migration record.
target_service = self.computes[target_host].service_ref
self.api.put_service(target_service.uuid, {'status': 'disabled'})
def _stub_delete_server_during_scheduling(self, server):
# Wrap the select_destinations call so we can delete the server
# concurrently while scheduling.
original_select_dests = \
query_client.SchedulerQueryClient.select_destinations
def wrap_select_dests(*args, **kwargs):
# Simulate concurrently deleting the server while scheduling.
self._delete_server(server)
return original_select_dests(*args, **kwargs)
self.stub_out('nova.scheduler.client.query.SchedulerQueryClient.'
'select_destinations', wrap_select_dests)
def test_migration_task_rollback(self):
"""Tests a scenario where the MigrationTask swaps the allocations
for a cold migrate (or resize, it does not matter) and then fails and
rolls back allocations before RPC casting to prep_resize on the dest
host.
"""
server, source_host, target_host = self._create_server()
self._disable_target_host(target_host)
self._stub_delete_server_during_scheduling(server)
# Now start the cold migration which will fail due to NoValidHost.
self.api.post_server_action(server['id'], {'migrate': None},
check_response_status=[202])
# We cannot monitor the migration from the API since it is deleted
# when the instance is deleted so just wait for the failed instance
# action event after the task rollback happens.
# Note that we get InstanceNotFound rather than NoValidHost because
# the NoValidHost handler in ComputeTaskManager._cold_migrate calls
# _set_vm_state_and_notify which raises InstanceNotFound and masks
# the NoValidHost error.
self._assert_resize_migrate_action_fail(
server, instance_actions.MIGRATE, 'InstanceNotFound')
self._assert_no_allocations(server)
def test_live_migration_task_rollback(self):
"""Tests a scenario where the LiveMigrationTask swaps the allocations
for a live migration and then fails and rolls back allocations before
RPC casting to live_migration on the source host.
"""
server, source_host, target_host = self._create_server()
self._disable_target_host(target_host)
self._stub_delete_server_during_scheduling(server)
# Now start the live migration which will fail due to NoValidHost.
body = {'os-migrateLive': {'host': None, 'block_migration': 'auto'}}
self.api.post_server_action(server['id'], body)
# We cannot monitor the migration from the API since it is deleted
# when the instance is deleted so just wait for the failed instance
# action event after the task rollback happens.
self._wait_for_action_fail_completion(
server, instance_actions.LIVE_MIGRATION,
'conductor_live_migrate_instance')
self._assert_no_allocations(server)
def test_migrate_on_compute_fail(self):
"""Tests a scenario where during the _prep_resize on the dest host
the instance is gone which triggers a failure and revert of the
migration-based allocations created in conductor.
"""
server, source_host, target_host = self._create_server()
# Wrap _prep_resize so we can concurrently delete the server.
original_prep_resize = compute_manager.ComputeManager._prep_resize
def wrap_prep_resize(*args, **kwargs):
self._delete_server(server)
return original_prep_resize(*args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize)
# Now start the cold migration which will fail in the dest compute.
self.api.post_server_action(server['id'], {'migrate': None})
# We cannot monitor the migration from the API since it is deleted
# when the instance is deleted so just wait for the failed instance
# action event after the allocation revert happens.
self._wait_for_action_fail_completion(
server, instance_actions.MIGRATE, 'compute_prep_resize')
self._assert_no_allocations(server)
| 48.375
| 77
| 0.687067
|
5636abbf24d3c0aa5a2514dea0a964b9046cb029
| 7,362
|
py
|
Python
|
samtranslator/plugins/api/implicit_rest_api_plugin.py
|
hawflau/serverless-application-model
|
d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2
|
[
"Apache-2.0"
] | null | null | null |
samtranslator/plugins/api/implicit_rest_api_plugin.py
|
hawflau/serverless-application-model
|
d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2
|
[
"Apache-2.0"
] | null | null | null |
samtranslator/plugins/api/implicit_rest_api_plugin.py
|
hawflau/serverless-application-model
|
d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2
|
[
"Apache-2.0"
] | null | null | null |
import six
from samtranslator.model.naming import GeneratedLogicalId
from samtranslator.plugins.api.implicit_api_plugin import ImplicitApiPlugin
from samtranslator.public.swagger import SwaggerEditor
from samtranslator.public.exceptions import InvalidEventException
from samtranslator.public.sdk.resource import SamResourceType, SamResource
class ImplicitRestApiPlugin(ImplicitApiPlugin):
"""
This plugin provides Implicit API shorthand syntax in the SAM Spec.
https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api
Implicit API syntax is just a syntactic sugar, which will be translated to AWS::Serverless::Api resource.
This is the only event source implemented as a plugin. Other event sources are not plugins because,
DynamoDB event source, for example, is not creating the DynamoDB resource. It just adds
a connection between the resource and Lambda. But with Implicit APIs, it creates and configures the API
resource in addition to adding the connection. This plugin will simply tackle the resource creation
bits and delegate the connection work to core translator.
To sum up, here is the split of responsibilities:
* This Plugin: Creates AWS::Serverless::Api and generates a Swagger with Methods, Paths, CORS, API Keys,
Usage Plans etc, essentially anything that configures API Gateway.
* API Event Source (In Core Translator): ONLY adds the Lambda Integration ARN to appropriate method/path
in Swagger. Does **not** configure the API by any means.
"""
def __init__(self):
"""
Initialize the plugin
"""
super(ImplicitRestApiPlugin, self).__init__(ImplicitRestApiPlugin.__name__)
def _setup_api_properties(self):
"""
Sets up properties that are distinct to this plugin
"""
self.implicit_api_logical_id = GeneratedLogicalId.implicit_api()
self.implicit_api_condition = "ServerlessRestApiCondition"
self.api_event_type = "Api"
self.api_type = SamResourceType.Api.value
self.api_id_property = "RestApiId"
self.editor = SwaggerEditor
def _process_api_events(
self, function, api_events, template, condition=None, deletion_policy=None, update_replace_policy=None
):
"""
Actually process given API events. Iteratively adds the APIs to Swagger JSON in the respective Serverless::Api
resource from the template
:param SamResource function: SAM Function containing the API events to be processed
:param dict api_events: API Events extracted from the function. These events will be processed
:param SamTemplate template: SAM Template where Serverless::Api resources can be found
:param str condition: optional; this is the condition that is on the function with the API event
"""
for logicalId, event in api_events.items():
event_properties = event.get("Properties", {})
if not event_properties:
continue
if not isinstance(event_properties, dict):
raise InvalidEventException(
logicalId,
"Event 'Properties' must be an Object. If you're using YAML, this may be an indentation issue.",
)
self._add_implicit_api_id_if_necessary(event_properties)
api_id = self._get_api_id(event_properties)
try:
path = event_properties["Path"]
method = event_properties["Method"]
except KeyError as e:
raise InvalidEventException(logicalId, "Event is missing key {}.".format(e))
if not isinstance(path, six.string_types):
raise InvalidEventException(logicalId, "Api Event must have a String specified for 'Path'.")
if not isinstance(method, six.string_types):
raise InvalidEventException(logicalId, "Api Event must have a String specified for 'Method'.")
# !Ref is resolved by this time. If it is not a string, we can't parse/use this Api.
if api_id and not isinstance(api_id, six.string_types):
raise InvalidEventException(
logicalId, "Api Event's RestApiId must be a string referencing an Api in the same template."
)
api_dict_condition = self.api_conditions.setdefault(api_id, {})
method_conditions = api_dict_condition.setdefault(path, {})
method_conditions[method] = condition
api_dict_deletion = self.api_deletion_policies.setdefault(api_id, set())
api_dict_deletion.add(deletion_policy)
api_dict_update_replace = self.api_update_replace_policies.setdefault(api_id, set())
api_dict_update_replace.add(update_replace_policy)
self._add_api_to_swagger(logicalId, event_properties, template)
api_events[logicalId] = event
# We could have made changes to the Events structure. Write it back to function
function.properties["Events"].update(api_events)
def _add_implicit_api_id_if_necessary(self, event_properties):
"""
Events for implicit APIs will *not* have the RestApiId property. Absence of this property means this event
is associated with the Serverless::Api ImplicitAPI resource. This method solifies this assumption by adding
RestApiId property to events that don't have them.
:param dict event_properties: Dictionary of event properties
"""
if "RestApiId" not in event_properties:
event_properties["RestApiId"] = {"Ref": self.implicit_api_logical_id}
def _generate_implicit_api_resource(self):
"""
Uses the implicit API in this file to generate an Implicit API resource
"""
return ImplicitApiResource().to_dict()
def _get_api_definition_from_editor(self, editor):
"""
Helper function to return the OAS definition from the editor
"""
return editor.swagger
def _get_api_resource_type_name(self):
"""
Returns the type of API resource
"""
return "AWS::Serverless::Api"
class ImplicitApiResource(SamResource):
"""
Returns a AWS::Serverless::Api resource representing the Implicit APIs. The returned resource includes
the empty swagger along with default values for other properties.
"""
def __init__(self):
swagger = SwaggerEditor.gen_skeleton()
resource = {
"Type": SamResourceType.Api.value,
"Properties": {
# Because we set the StageName to be constant value here, customers cannot override StageName with
# Globals. This is because, if a property is specified in both Globals and the resource, the resource
# one takes precedence.
"StageName": "Prod",
"DefinitionBody": swagger,
# Internal property that means Event source code can add Events. Used only for implicit APIs, to
# prevent back compatibility issues for explicit APIs
"__MANAGE_SWAGGER": True,
},
}
super(ImplicitApiResource, self).__init__(resource)
| 44.618182
| 118
| 0.674817
|
dd64b7f2af8e41e19a51c8f19ae00b85d629da22
| 345,605
|
py
|
Python
|
google/cloud/aiplatform/training_jobs.py
|
connor-mccarthy/python-aiplatform
|
184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/training_jobs.py
|
connor-mccarthy/python-aiplatform
|
184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/training_jobs.py
|
connor-mccarthy/python-aiplatform
|
184f7f327aa00b4c8d1acc24dcb1c4c4be6c5bcc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import time
from typing import Dict, List, Optional, Sequence, Tuple, Union
import abc
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import base
from google.cloud.aiplatform.constants import base as constants
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import models
from google.cloud.aiplatform import jobs
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import console_utils
from google.cloud.aiplatform.compat.types import (
env_var as gca_env_var,
io as gca_io,
model as gca_model,
pipeline_state as gca_pipeline_state,
training_pipeline as gca_training_pipeline,
)
from google.cloud.aiplatform.utils import _timestamped_gcs_dir
from google.cloud.aiplatform.utils import source_utils
from google.cloud.aiplatform.utils import worker_spec_utils
from google.cloud.aiplatform.utils import column_transformations_utils
from google.cloud.aiplatform.v1.schema.trainingjob import (
definition_v1 as training_job_inputs,
)
from google.rpc import code_pb2
from google.rpc import status_pb2
import proto
_LOGGER = base.Logger(__name__)
_PIPELINE_COMPLETE_STATES = set(
[
gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
gca_pipeline_state.PipelineState.PIPELINE_STATE_CANCELLED,
gca_pipeline_state.PipelineState.PIPELINE_STATE_PAUSED,
]
)
class _TrainingJob(base.VertexAiStatefulResource):
client_class = utils.PipelineClientWithOverride
_resource_noun = "trainingPipelines"
_getter_method = "get_training_pipeline"
_list_method = "list_training_pipelines"
_delete_method = "delete_training_pipeline"
_parse_resource_name_method = "parse_training_pipeline_path"
_format_resource_name_method = "training_pipeline_path"
# Required by the done() method
_valid_done_states = _PIPELINE_COMPLETE_STATES
def __init__(
self,
display_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
):
"""Constructs a Training Job.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
project (str):
Optional project to retrieve model from. If not set, project set in
aiplatform.init will be used.
location (str):
Optional location to retrieve model from. If not set, location set in
aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional credentials to use to retrieve the model.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
"""
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
super().__init__(project=project, location=location, credentials=credentials)
self._display_name = display_name
self._labels = labels
self._training_encryption_spec = initializer.global_config.get_encryption_spec(
encryption_spec_key_name=training_encryption_spec_key_name
)
self._model_encryption_spec = initializer.global_config.get_encryption_spec(
encryption_spec_key_name=model_encryption_spec_key_name
)
self._gca_resource = None
@property
@classmethod
@abc.abstractmethod
def _supported_training_schemas(cls) -> Tuple[str]:
"""List of supported schemas for this training job."""
pass
@property
def start_time(self) -> Optional[datetime.datetime]:
"""Time when the TrainingJob entered the `PIPELINE_STATE_RUNNING` for
the first time."""
self._sync_gca_resource()
return getattr(self._gca_resource, "start_time")
@property
def end_time(self) -> Optional[datetime.datetime]:
"""Time when the TrainingJob resource entered the `PIPELINE_STATE_SUCCEEDED`,
`PIPELINE_STATE_FAILED`, `PIPELINE_STATE_CANCELLED` state."""
self._sync_gca_resource()
return getattr(self._gca_resource, "end_time")
@property
def error(self) -> Optional[status_pb2.Status]:
"""Detailed error info for this TrainingJob resource. Only populated when
the TrainingJob's state is `PIPELINE_STATE_FAILED` or
`PIPELINE_STATE_CANCELLED`."""
self._sync_gca_resource()
return getattr(self._gca_resource, "error")
@classmethod
def get(
cls,
resource_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "_TrainingJob":
"""Get Training Job for the given resource_name.
Args:
resource_name (str):
Required. A fully-qualified resource name or ID.
project (str):
Optional project to retrieve training job from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional location to retrieve training job from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
Raises:
ValueError: If the retrieved training job's training task definition
doesn't match the custom training task definition.
Returns:
A Vertex AI Training Job
"""
# Create job with dummy parameters
# These parameters won't be used as user can not run the job again.
# If they try, an exception will be raised.
self = cls._empty_constructor(
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
self._gca_resource = self._get_gca_resource(resource_name=resource_name)
if (
self._gca_resource.training_task_definition
not in cls._supported_training_schemas
):
raise ValueError(
f"The retrieved job's training task definition "
f"is {self._gca_resource.training_task_definition}, "
f"which is not compatible with {cls.__name__}."
)
return self
@classmethod
def _get_and_return_subclass(
cls,
resource_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "_TrainingJob":
"""Retrieve Training Job subclass for the given resource_name without
knowing the training_task_definition.
Example usage:
```
aiplatform.training_jobs._TrainingJob._get_and_return_subclass(
'projects/.../locations/.../trainingPipelines/12345'
)
# Returns: <google.cloud.aiplatform.training_jobs.AutoMLImageTrainingJob>
```
Args:
resource_name (str):
Required. A fully-qualified resource name or ID.
project (str):
Optional project to retrieve dataset from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional location to retrieve dataset from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
Returns:
A Vertex AI Training Job
"""
# Retrieve training pipeline resource before class construction
client = cls._instantiate_client(location=location, credentials=credentials)
gca_training_pipeline = getattr(client, cls._getter_method)(name=resource_name)
schema_uri = gca_training_pipeline.training_task_definition
# Collect all AutoML training job classes and CustomTrainingJob
class_list = [
c for c in cls.__subclasses__() if c.__name__.startswith("AutoML")
] + [CustomTrainingJob]
# Identify correct training job subclass, construct and return object
for c in class_list:
if schema_uri in c._supported_training_schemas:
return c._empty_constructor(
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
@property
@abc.abstractmethod
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
pass
@abc.abstractmethod
def run(self) -> Optional[models.Model]:
"""Runs the training job.
Should call _run_job internally
"""
pass
@staticmethod
def _create_input_data_config(
dataset: Optional[datasets._Dataset] = None,
annotation_schema_uri: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
gcs_destination_uri_prefix: Optional[str] = None,
bigquery_destination: Optional[str] = None,
) -> Optional[gca_training_pipeline.InputDataConfig]:
"""Constructs a input data config to pass to the training pipeline.
Args:
dataset (datasets._Dataset):
The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files
that can be used here are found in
gs://google-cloud-aiplatform/schema/dataset/annotation/,
note that the chosen schema must be consistent with
``metadata``
of the Dataset specified by
``dataset_id``.
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
This parameter must be used with training_fraction_split, validation_fraction_split and test_fraction_split.
gcs_destination_uri_prefix (str):
Optional. The Google Cloud Storage location.
The Vertex AI environment variables representing Google
Cloud Storage data URIs will always be represented in the
Google Cloud Storage wildcard format to support sharded
data.
- AIP_DATA_FORMAT = "jsonl".
- AIP_TRAINING_DATA_URI = "gcs_destination/training-*"
- AIP_VALIDATION_DATA_URI = "gcs_destination/validation-*"
- AIP_TEST_DATA_URI = "gcs_destination/test-*".
bigquery_destination (str):
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
Raises:
ValueError: When more than 1 type of split configuration is passed or when
the split configuration passed is incompatible with the dataset schema.
"""
input_data_config = None
if dataset:
# Initialize all possible splits
filter_split = None
predefined_split = None
timestamp_split = None
fraction_split = None
# Create filter split
if any(
[
training_filter_split is not None,
validation_filter_split is not None,
test_filter_split is not None,
]
):
if all(
[
training_filter_split is not None,
validation_filter_split is not None,
test_filter_split is not None,
]
):
filter_split = gca_training_pipeline.FilterSplit(
training_filter=training_filter_split,
validation_filter=validation_filter_split,
test_filter=test_filter_split,
)
else:
raise ValueError(
"All filter splits must be passed together or not at all"
)
# Create predefined split
if predefined_split_column_name:
predefined_split = gca_training_pipeline.PredefinedSplit(
key=predefined_split_column_name
)
# Create timestamp split or fraction split
if timestamp_split_column_name:
timestamp_split = gca_training_pipeline.TimestampSplit(
training_fraction=training_fraction_split,
validation_fraction=validation_fraction_split,
test_fraction=test_fraction_split,
key=timestamp_split_column_name,
)
elif any(
[
training_fraction_split is not None,
validation_fraction_split is not None,
test_fraction_split is not None,
]
):
fraction_split = gca_training_pipeline.FractionSplit(
training_fraction=training_fraction_split,
validation_fraction=validation_fraction_split,
test_fraction=test_fraction_split,
)
splits = [
split
for split in [
filter_split,
predefined_split,
timestamp_split_column_name,
fraction_split,
]
if split is not None
]
# Fallback to fraction split if nothing else is specified
if len(splits) == 0:
_LOGGER.info(
"No dataset split provided. The service will use a default split."
)
elif len(splits) > 1:
raise ValueError(
"""Can only specify one of:
1. training_filter_split, validation_filter_split, test_filter_split
2. predefined_split_column_name
3. timestamp_split_column_name, training_fraction_split, validation_fraction_split, test_fraction_split
4. training_fraction_split, validation_fraction_split, test_fraction_split"""
)
# create GCS destination
gcs_destination = None
if gcs_destination_uri_prefix:
gcs_destination = gca_io.GcsDestination(
output_uri_prefix=gcs_destination_uri_prefix
)
# TODO(b/177416223) validate managed BQ dataset is passed in
bigquery_destination_proto = None
if bigquery_destination:
bigquery_destination_proto = gca_io.BigQueryDestination(
output_uri=bigquery_destination
)
# create input data config
input_data_config = gca_training_pipeline.InputDataConfig(
fraction_split=fraction_split,
filter_split=filter_split,
predefined_split=predefined_split,
timestamp_split=timestamp_split,
dataset_id=dataset.name,
annotation_schema_uri=annotation_schema_uri,
gcs_destination=gcs_destination,
bigquery_destination=bigquery_destination_proto,
)
return input_data_config
def _run_job(
self,
training_task_definition: str,
training_task_inputs: Union[dict, proto.Message],
dataset: Optional[datasets._Dataset],
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
annotation_schema_uri: Optional[str] = None,
model: Optional[gca_model.Model] = None,
gcs_destination_uri_prefix: Optional[str] = None,
bigquery_destination: Optional[str] = None,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Runs the training job.
Args:
training_task_definition (str):
Required. A Google Cloud Storage path to the
YAML file that defines the training task which
is responsible for producing the model artifact,
and may also include additional auxiliary work.
The definition files that can be used here are
found in gs://google-cloud-
aiplatform/schema/trainingjob/definition/. Note:
The URI given on output will be immutable and
probably different, including the URI scheme,
than the one given on input. The output URI will
point to a location where the user only has a
read access.
training_task_inputs (Union[dict, proto.Message]):
Required. The training task's input that corresponds to the training_task_definition parameter.
dataset (datasets._Dataset):
The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files
that can be used here are found in
gs://google-cloud-aiplatform/schema/dataset/annotation/,
note that the chosen schema must be consistent with
``metadata``
of the Dataset specified by
``dataset_id``.
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
This parameter must be used with training_fraction_split, validation_fraction_split and test_fraction_split.
model (~.model.Model):
Optional. Describes the Model that may be uploaded (via
[ModelService.UploadMode][]) by this TrainingPipeline. The
TrainingPipeline's
``training_task_definition``
should make clear whether this Model description should be
populated, and if there are any special requirements
regarding how it should be filled. If nothing is mentioned
in the
``training_task_definition``,
then it should be assumed that this field should not be
filled and the training task either uploads the Model
without a need of this information, or that training task
does not support uploading a Model as part of the pipeline.
When the Pipeline's state becomes
``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been
uploaded into Vertex AI, then the model_to_upload's
resource ``name``
is populated. The Model is always uploaded into the Project
and Location in which this pipeline is.
gcs_destination_uri_prefix (str):
Optional. The Google Cloud Storage location.
The Vertex AI environment variables representing Google
Cloud Storage data URIs will always be represented in the
Google Cloud Storage wildcard format to support sharded
data.
- AIP_DATA_FORMAT = "jsonl".
- AIP_TRAINING_DATA_URI = "gcs_destination/training-*"
- AIP_VALIDATION_DATA_URI = "gcs_destination/validation-*"
- AIP_TEST_DATA_URI = "gcs_destination/test-*".
bigquery_destination (str):
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
"""
input_data_config = self._create_input_data_config(
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
gcs_destination_uri_prefix=gcs_destination_uri_prefix,
bigquery_destination=bigquery_destination,
)
# create training pipeline
training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=self._display_name,
training_task_definition=training_task_definition,
training_task_inputs=training_task_inputs,
model_to_upload=model,
input_data_config=input_data_config,
labels=self._labels,
encryption_spec=self._training_encryption_spec,
)
training_pipeline = self.api_client.create_training_pipeline(
parent=initializer.global_config.common_location_path(
self.project, self.location
),
training_pipeline=training_pipeline,
timeout=create_request_timeout,
)
self._gca_resource = training_pipeline
_LOGGER.info("View Training:\n%s" % self._dashboard_uri())
model = self._get_model()
if model is None:
_LOGGER.warning(
"Training did not produce a Managed Model returning None. "
+ self._model_upload_fail_string
)
return model
def _is_waiting_to_run(self) -> bool:
"""Returns True if the Job is pending on upstream tasks False
otherwise."""
self._raise_future_exception()
if self._latest_future:
_LOGGER.info(
"Training Job is waiting for upstream SDK tasks to complete before"
" launching."
)
return True
return False
@property
def state(self) -> Optional[gca_pipeline_state.PipelineState]:
"""Current training state."""
if self._assert_has_run():
return
self._sync_gca_resource()
return self._gca_resource.state
def get_model(self, sync=True) -> models.Model:
"""Vertex AI Model produced by this training, if one was produced.
Args:
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
model: Vertex AI Model produced by this training
Raises:
RuntimeError: If training failed or if a model was not produced by this training.
"""
self._assert_has_run()
if not self._gca_resource.model_to_upload:
raise RuntimeError(self._model_upload_fail_string)
return self._force_get_model(sync=sync)
@base.optional_sync()
def _force_get_model(self, sync: bool = True) -> models.Model:
"""Vertex AI Model produced by this training, if one was produced.
Args:
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
model: Vertex AI Model produced by this training
Raises:
RuntimeError: If training failed or if a model was not produced by this training.
"""
model = self._get_model()
if model is None:
raise RuntimeError(self._model_upload_fail_string)
return model
def _get_model(self) -> Optional[models.Model]:
"""Helper method to get and instantiate the Model to Upload.
Returns:
model: Vertex AI Model if training succeeded and produced a Vertex AI
Model. None otherwise.
Raises:
RuntimeError: If Training failed.
"""
self._block_until_complete()
if self.has_failed:
raise RuntimeError(
f"Training Pipeline {self.resource_name} failed. No model available."
)
if not self._gca_resource.model_to_upload:
return None
if self._gca_resource.model_to_upload.name:
return models.Model(model_name=self._gca_resource.model_to_upload.name)
def _wait_callback(self):
"""Callback performs custom logging during _block_until_complete. Override in subclass."""
pass
def _block_until_complete(self):
"""Helper method to block and check on job until complete."""
# Used these numbers so failures surface fast
wait = 5 # start at five seconds
log_wait = 5
max_wait = 60 * 5 # 5 minute wait
multiplier = 2 # scale wait by 2 every iteration
previous_time = time.time()
while self.state not in _PIPELINE_COMPLETE_STATES:
current_time = time.time()
if current_time - previous_time >= log_wait:
_LOGGER.info(
"%s %s current state:\n%s"
% (
self.__class__.__name__,
self._gca_resource.name,
self._gca_resource.state,
)
)
log_wait = min(log_wait * multiplier, max_wait)
previous_time = current_time
self._wait_callback()
time.sleep(wait)
self._raise_failure()
_LOGGER.log_action_completed_against_resource("run", "completed", self)
if self._gca_resource.model_to_upload and not self.has_failed:
_LOGGER.info(
"Model available at %s" % self._gca_resource.model_to_upload.name
)
def _raise_failure(self):
"""Helper method to raise failure if TrainingPipeline fails.
Raises:
RuntimeError: If training failed.
"""
if self._gca_resource.error.code != code_pb2.OK:
raise RuntimeError("Training failed with:\n%s" % self._gca_resource.error)
@property
def has_failed(self) -> bool:
"""Returns True if training has failed.
False otherwise.
"""
self._assert_has_run()
return self.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED
def _dashboard_uri(self) -> str:
"""Helper method to compose the dashboard uri where training can be
viewed."""
fields = self._parse_resource_name(self.resource_name)
url = f"https://console.cloud.google.com/ai/platform/locations/{fields['location']}/training/{fields['training_pipeline']}?project={fields['project']}"
return url
@property
def _has_run(self) -> bool:
"""Helper property to check if this training job has been run."""
return self._gca_resource is not None
def _assert_has_run(self) -> bool:
"""Helper method to assert that this training has run."""
if not self._has_run:
if self._is_waiting_to_run():
return True
raise RuntimeError(
"TrainingPipeline has not been launched. You must run this"
" TrainingPipeline using TrainingPipeline.run. "
)
return False
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["base.VertexAiResourceNoun"]:
"""List all instances of this TrainingJob resource.
Example Usage:
aiplatform.CustomTrainingJob.list(
filter='display_name="experiment_a27"',
order_by='create_time desc'
)
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[VertexAiResourceNoun] - A list of TrainingJob resource objects
"""
training_job_subclass_filter = (
lambda gapic_obj: gapic_obj.training_task_definition
in cls._supported_training_schemas
)
return cls._list_with_local_order(
cls_filter=training_job_subclass_filter,
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
def cancel(self) -> None:
"""Starts asynchronous cancellation on the TrainingJob. The server
makes a best effort to cancel the job, but success is not guaranteed.
On successful cancellation, the TrainingJob is not deleted; instead it
becomes a job with state set to `CANCELLED`.
Raises:
RuntimeError: If this TrainingJob has not started running.
"""
if not self._has_run:
raise RuntimeError(
"This TrainingJob has not been launched, use the `run()` method "
"to start. `cancel()` can only be called on a job that is running."
)
self.api_client.cancel_training_pipeline(name=self.resource_name)
def wait_for_resource_creation(self) -> None:
"""Waits until resource has been created."""
self._wait_for_resource_creation()
class _CustomTrainingJob(_TrainingJob):
"""ABC for Custom Training Pipelines.."""
_supported_training_schemas = (schema.training_job.definition.custom_task,)
def __init__(
self,
display_name: str,
container_uri: str,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
container_uri (str):
Required: Uri of the training container image in the GCR.
model_serving_container_image_uri (str):
If the training produces a managed Vertex AI Model, the URI of the
Model serving container suitable for serving the model produced by the
training script.
model_serving_container_predict_route (str):
If the training produces a managed Vertex AI Model, An HTTP path to
send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
model_serving_container_health_route (str):
If the training produces a managed Vertex AI Model, an HTTP path to
send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI
Platform.
model_serving_container_command (Sequence[str]):
The command with which the container is run. Not executed within a
shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
model_serving_container_args (Sequence[str]):
The arguments to the command. The Docker image's CMD is used if this is
not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
model_serving_container_environment_variables (Dict[str, str]):
The environment variables that are to be present in the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
model_serving_container_ports (Sequence[int]):
Declaration of ports that are exposed by the container. This field is
primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
model_description (str):
The description of the Model.
model_instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
model_parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
model_prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
project (str):
Project to run training in. Overrides project set in aiplatform.init.
location (str):
Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Bucket used to stage source and training artifacts. Overrides
staging_bucket set in aiplatform.init.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
self._container_uri = container_uri
model_predict_schemata = None
if any(
[
model_instance_schema_uri,
model_parameters_schema_uri,
model_prediction_schema_uri,
]
):
model_predict_schemata = gca_model.PredictSchemata(
instance_schema_uri=model_instance_schema_uri,
parameters_schema_uri=model_parameters_schema_uri,
prediction_schema_uri=model_prediction_schema_uri,
)
# Create the container spec
env = None
ports = None
if model_serving_container_environment_variables:
env = [
gca_env_var.EnvVar(name=str(key), value=str(value))
for key, value in model_serving_container_environment_variables.items()
]
if model_serving_container_ports:
ports = [
gca_model.Port(container_port=port)
for port in model_serving_container_ports
]
container_spec = gca_model.ModelContainerSpec(
image_uri=model_serving_container_image_uri,
command=model_serving_container_command,
args=model_serving_container_args,
env=env,
ports=ports,
predict_route=model_serving_container_predict_route,
health_route=model_serving_container_health_route,
)
# create model payload
self._managed_model = gca_model.Model(
description=model_description,
predict_schemata=model_predict_schemata,
container_spec=container_spec,
encryption_spec=self._model_encryption_spec,
)
self._staging_bucket = (
staging_bucket or initializer.global_config.staging_bucket
)
if not self._staging_bucket:
raise RuntimeError(
"staging_bucket should be set in TrainingJob constructor or "
"set using aiplatform.init(staging_bucket='gs://my-bucket')"
)
# Backing Custom Job resource is not known until after data preprocessing
# once Custom Job is known we log the console uri and the tensorboard uri
# this flags keeps that state so we don't log it multiple times
self._has_logged_custom_job = False
self._logged_web_access_uris = set()
@property
def network(self) -> Optional[str]:
"""The full name of the Google Compute Engine
[network](https://cloud.google.com/vpc/docs/vpc#networks) to which this
CustomTrainingJob should be peered.
Takes the format `projects/{project}/global/networks/{network}`. Where
{project} is a project number, as in `12345`, and {network} is a network name.
Private services access must already be configured for the network. If left
unspecified, the CustomTrainingJob is not peered with any network.
"""
# Return `network` value in training task inputs if set in Map
self._assert_gca_resource_is_available()
return self._gca_resource.training_task_inputs.get("network")
def _prepare_and_validate_run(
self,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
reduction_server_replica_count: int = 0,
reduction_server_machine_type: Optional[str] = None,
) -> Tuple[worker_spec_utils._DistributedTrainingSpec, Optional[gca_model.Model]]:
"""Create worker pool specs and managed model as well validating the
run.
Args:
model_display_name (str):
If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
replica_count (int):
The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
machine_type (str):
The type of machine to use for training.
accelerator_type (str):
Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
accelerator_count (int):
The number of accelerators to attach to a worker replica.
boot_disk_type (str):
Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
boot_disk_size_gb (int):
Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
reduction_server_replica_count (int):
The number of reduction server replicas, default is 0.
reduction_server_machine_type (str):
Optional. The type of machine to use for reduction server.
Returns:
Worker pools specs and managed model for run.
Raises:
RuntimeError: If Training job has already been run or model_display_name was
provided but required arguments were not provided in constructor.
"""
if self._is_waiting_to_run():
raise RuntimeError("Custom Training is already scheduled to run.")
if self._has_run:
raise RuntimeError("Custom Training has already run.")
# if args needed for model is incomplete
if model_display_name and not self._managed_model.container_spec.image_uri:
raise RuntimeError(
"""model_display_name was provided but
model_serving_container_image_uri was not provided when this
custom pipeline was constructed.
"""
)
if self._managed_model.container_spec.image_uri:
model_display_name = model_display_name or self._display_name + "-model"
# validates args and will raise
worker_pool_specs = (
worker_spec_utils._DistributedTrainingSpec.chief_worker_pool(
replica_count=replica_count,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
reduction_server_replica_count=reduction_server_replica_count,
reduction_server_machine_type=reduction_server_machine_type,
).pool_specs
)
managed_model = self._managed_model
if model_display_name:
utils.validate_display_name(model_display_name)
managed_model.display_name = model_display_name
if model_labels:
utils.validate_labels(model_labels)
managed_model.labels = model_labels
else:
managed_model.labels = self._labels
else:
managed_model = None
return worker_pool_specs, managed_model
def _prepare_training_task_inputs_and_output_dir(
self,
worker_pool_specs: worker_spec_utils._DistributedTrainingSpec,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
) -> Tuple[Dict, str]:
"""Prepares training task inputs and output directory for custom job.
Args:
worker_pools_spec (worker_spec_utils._DistributedTrainingSpec):
Worker pools pecs required to run job.
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
Returns:
Training task inputs and Output directory for custom job.
"""
# default directory if not given
base_output_dir = base_output_dir or _timestamped_gcs_dir(
self._staging_bucket, "aiplatform-custom-training"
)
_LOGGER.info("Training Output directory:\n%s " % base_output_dir)
training_task_inputs = {
"worker_pool_specs": worker_pool_specs,
"base_output_directory": {"output_uri_prefix": base_output_dir},
}
if service_account:
training_task_inputs["service_account"] = service_account
if network:
training_task_inputs["network"] = network
if tensorboard:
training_task_inputs["tensorboard"] = tensorboard
if enable_web_access:
training_task_inputs["enable_web_access"] = enable_web_access
if timeout or restart_job_on_worker_restart:
timeout = f"{timeout}s" if timeout else None
scheduling = {
"timeout": timeout,
"restart_job_on_worker_restart": restart_job_on_worker_restart,
}
training_task_inputs["scheduling"] = scheduling
return training_task_inputs, base_output_dir
@property
def web_access_uris(self) -> Dict[str, str]:
"""Get the web access uris of the backing custom job.
Returns:
(Dict[str, str]):
Web access uris of the backing custom job.
"""
web_access_uris = dict()
if (
self._gca_resource.training_task_metadata
and self._gca_resource.training_task_metadata.get("backingCustomJob")
):
custom_job_resource_name = self._gca_resource.training_task_metadata.get(
"backingCustomJob"
)
custom_job = jobs.CustomJob.get(resource_name=custom_job_resource_name)
web_access_uris = dict(custom_job.web_access_uris)
return web_access_uris
def _log_web_access_uris(self):
"""Helper method to log the web access uris of the backing custom job"""
for worker, uri in self.web_access_uris.items():
if uri not in self._logged_web_access_uris:
_LOGGER.info(
"%s %s access the interactive shell terminals for the backing custom job:\n%s:\n%s"
% (
self.__class__.__name__,
self._gca_resource.name,
worker,
uri,
),
)
self._logged_web_access_uris.add(uri)
def _wait_callback(self):
if (
self._gca_resource.training_task_metadata
and self._gca_resource.training_task_metadata.get("backingCustomJob")
and not self._has_logged_custom_job
):
_LOGGER.info(f"View backing custom job:\n{self._custom_job_console_uri()}")
if self._gca_resource.training_task_inputs.get("tensorboard"):
_LOGGER.info(f"View tensorboard:\n{self._tensorboard_console_uri()}")
self._has_logged_custom_job = True
if self._gca_resource.training_task_inputs.get("enable_web_access"):
self._log_web_access_uris()
def _custom_job_console_uri(self) -> str:
"""Helper method to compose the dashboard uri where custom job can be viewed."""
custom_job_resource_name = self._gca_resource.training_task_metadata.get(
"backingCustomJob"
)
return console_utils.custom_job_console_uri(custom_job_resource_name)
def _tensorboard_console_uri(self) -> str:
"""Helper method to compose dashboard uri where tensorboard can be viewed."""
tensorboard_resource_name = self._gca_resource.training_task_inputs.get(
"tensorboard"
)
custom_job_resource_name = self._gca_resource.training_task_metadata.get(
"backingCustomJob"
)
return console_utils.custom_job_tensorboard_console_uri(
tensorboard_resource_name, custom_job_resource_name
)
@property
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
return (
f"Training Pipeline {self.resource_name} is not configured to upload a "
"Model. Create the Training Pipeline with "
"model_serving_container_image_uri and model_display_name passed in. "
"Ensure that your training script saves to model to "
"os.environ['AIP_MODEL_DIR']."
)
# TODO(b/172368325) add scheduling, custom_job.Scheduling
class CustomTrainingJob(_CustomTrainingJob):
"""Class to launch a Custom Training Job in Vertex AI using a script.
Takes a training implementation as a python script and executes that
script in Cloud Vertex AI Training.
"""
def __init__(
self,
display_name: str,
script_path: str,
container_uri: str,
requirements: Optional[Sequence[str]] = None,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""Constructs a Custom Training Job from a Python script.
job = aiplatform.CustomTrainingJob(
display_name='test-train',
script_path='test_script.py',
requirements=['pandas', 'numpy'],
container_uri='gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest',
model_serving_container_image_uri='gcr.io/my-trainer/serving:1',
model_serving_container_predict_route='predict',
model_serving_container_health_route='metadata,
labels={'key': 'value'},
)
Usage with Dataset:
ds = aiplatform.TabularDataset(
'projects/my-project/locations/us-central1/datasets/12345')
job.run(
ds,
replica_count=1,
model_display_name='my-trained-model',
model_labels={'key': 'value'},
)
Usage without Dataset:
job.run(replica_count=1, model_display_name='my-trained-model)
TODO(b/169782082) add documentation about traning utilities
To ensure your model gets saved in Vertex AI, write your saved model to
os.environ["AIP_MODEL_DIR"] in your provided training script.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
script_path (str): Required. Local path to training script.
container_uri (str):
Required: Uri of the training container image in the GCR.
requirements (Sequence[str]):
List of python packages dependencies of script.
model_serving_container_image_uri (str):
If the training produces a managed Vertex AI Model, the URI of the
Model serving container suitable for serving the model produced by the
training script.
model_serving_container_predict_route (str):
If the training produces a managed Vertex AI Model, An HTTP path to
send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
model_serving_container_health_route (str):
If the training produces a managed Vertex AI Model, an HTTP path to
send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI
Platform.
model_serving_container_command (Sequence[str]):
The command with which the container is run. Not executed within a
shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
model_serving_container_args (Sequence[str]):
The arguments to the command. The Docker image's CMD is used if this is
not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
model_serving_container_environment_variables (Dict[str, str]):
The environment variables that are to be present in the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
model_serving_container_ports (Sequence[int]):
Declaration of ports that are exposed by the container. This field is
primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
model_description (str):
The description of the Model.
model_instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
model_parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
model_prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
project (str):
Project to run training in. Overrides project set in aiplatform.init.
location (str):
Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Bucket used to stage source and training artifacts. Overrides
staging_bucket set in aiplatform.init.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
container_uri=container_uri,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_description=model_description,
staging_bucket=staging_bucket,
)
self._requirements = requirements
self._script_path = script_path
def run(
self,
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
reduction_server_replica_count: int = 0,
reduction_server_machine_type: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Runs the custom training job.
Distributed Training Support:
If replica count = 1 then one chief replica will be provisioned. If
replica_count > 1 the remainder will be provisioned as a worker replica pool.
ie: replica_count = 10 will result in 1 chief and 9 workers
All replicas have same machine_type, accelerator_type, and accelerator_count
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
):
Vertex AI to fit this training against. Custom training script should
retrieve datasets through passed in environment variables uris:
os.environ["AIP_TRAINING_DATA_URI"]
os.environ["AIP_VALIDATION_DATA_URI"]
os.environ["AIP_TEST_DATA_URI"]
Additionally the dataset format is passed in as:
os.environ["AIP_DATA_FORMAT"]
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files
that can be used here are found in
gs://google-cloud-aiplatform/schema/dataset/annotation/,
note that the chosen schema must be consistent with
``metadata``
of the Dataset specified by
``dataset_id``.
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
model_display_name (str):
If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
bigquery_destination (str):
Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
args (List[Unions[str, int, float]]):
Command line arguments to be passed to the Python script.
environment_variables (Dict[str, str]):
Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
environment_variables = {
'MY_KEY': 'MY_VALUE'
}
replica_count (int):
The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
machine_type (str):
The type of machine to use for training.
accelerator_type (str):
Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
accelerator_count (int):
The number of accelerators to attach to a worker replica.
boot_disk_type (str):
Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
boot_disk_size_gb (int):
Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
reduction_server_replica_count (int):
The number of reduction server replicas, default is 0.
reduction_server_machine_type (str):
Optional. The type of machine to use for reduction server.
reduction_server_container_uri (str):
Optional. The Uri of the reduction server container image.
See details: https://cloud.google.com/vertex-ai/docs/training/distributed-training#reduce_training_time_with_reduction_server
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
worker_pool_specs, managed_model = self._prepare_and_validate_run(
model_display_name=model_display_name,
model_labels=model_labels,
replica_count=replica_count,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
reduction_server_replica_count=reduction_server_replica_count,
reduction_server_machine_type=reduction_server_machine_type,
)
# make and copy package
python_packager = source_utils._TrainingScriptPythonPackager(
script_path=self._script_path, requirements=self._requirements
)
return self._run(
python_packager=python_packager,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
worker_pool_specs=worker_pool_specs,
managed_model=managed_model,
args=args,
environment_variables=environment_variables,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
timeout=timeout,
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
reduction_server_container_uri=reduction_server_container_uri
if reduction_server_replica_count > 0
else None,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync(construct_object_on_arg="managed_model")
def _run(
self,
python_packager: source_utils._TrainingScriptPythonPackager,
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
],
annotation_schema_uri: Optional[str],
worker_pool_specs: worker_spec_utils._DistributedTrainingSpec,
managed_model: Optional[gca_model.Model] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Packages local script and launches training_job.
Args:
python_packager (source_utils._TrainingScriptPythonPackager):
Required. Python Packager pointing to training script locally.
dataset (
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
):
Vertex AI to fit this training against.
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema.
worker_pools_spec (worker_spec_utils._DistributedTrainingSpec):
Worker pools pecs required to run job.
managed_model (gca_model.Model):
Model proto if this script produces a Managed Model.
args (List[Unions[str, int, float]]):
Command line arguments to be passed to the Python script.
environment_variables (Dict[str, str]):
Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
environment_variables = {
'MY_KEY': 'MY_VALUE'
}
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
bigquery_destination (str):
Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
reduction_server_container_uri (str):
Optional. The Uri of the reduction server container image.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float)
Optional. The timeout for the create request in seconds
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
package_gcs_uri = python_packager.package_and_copy_to_gcs(
gcs_staging_dir=self._staging_bucket,
project=self.project,
credentials=self.credentials,
)
for spec_order, spec in enumerate(worker_pool_specs):
if not spec:
continue
if (
spec_order == worker_spec_utils._SPEC_ORDERS["server_spec"]
and reduction_server_container_uri
):
spec["container_spec"] = {
"image_uri": reduction_server_container_uri,
}
else:
spec["python_package_spec"] = {
"executor_image_uri": self._container_uri,
"python_module": python_packager.module_name,
"package_uris": [package_gcs_uri],
}
if args:
spec["python_package_spec"]["args"] = args
if environment_variables:
spec["python_package_spec"]["env"] = [
{"name": key, "value": value}
for key, value in environment_variables.items()
]
(
training_task_inputs,
base_output_dir,
) = self._prepare_training_task_inputs_and_output_dir(
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
timeout=timeout,
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
)
model = self._run_job(
training_task_definition=schema.training_job.definition.custom_task,
training_task_inputs=training_task_inputs,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
model=managed_model,
gcs_destination_uri_prefix=base_output_dir,
bigquery_destination=bigquery_destination,
create_request_timeout=create_request_timeout,
)
return model
class CustomContainerTrainingJob(_CustomTrainingJob):
"""Class to launch a Custom Training Job in Vertex AI using a
Container."""
def __init__(
self,
display_name: str,
container_uri: str,
command: Sequence[str] = None,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""Constructs a Custom Container Training Job.
job = aiplatform.CustomContainerTrainingJob(
display_name='test-train',
container_uri='gcr.io/my_project_id/my_image_name:tag',
command=['python3', 'run_script.py']
model_serving_container_image_uri='gcr.io/my-trainer/serving:1',
model_serving_container_predict_route='predict',
model_serving_container_health_route='metadata,
labels={'key': 'value'},
)
Usage with Dataset:
ds = aiplatform.TabularDataset(
'projects/my-project/locations/us-central1/datasets/12345')
job.run(
ds,
replica_count=1,
model_display_name='my-trained-model',
model_labels={'key': 'value'},
)
Usage without Dataset:
job.run(replica_count=1, model_display_name='my-trained-model)
TODO(b/169782082) add documentation about traning utilities
To ensure your model gets saved in Vertex AI, write your saved model to
os.environ["AIP_MODEL_DIR"] in your provided training script.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
container_uri (str):
Required: Uri of the training container image in the GCR.
command (Sequence[str]):
The command to be invoked when the container is started.
It overrides the entrypoint instruction in Dockerfile when provided
model_serving_container_image_uri (str):
If the training produces a managed Vertex AI Model, the URI of the
Model serving container suitable for serving the model produced by the
training script.
model_serving_container_predict_route (str):
If the training produces a managed Vertex AI Model, An HTTP path to
send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
model_serving_container_health_route (str):
If the training produces a managed Vertex AI Model, an HTTP path to
send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI
Platform.
model_serving_container_command (Sequence[str]):
The command with which the container is run. Not executed within a
shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
model_serving_container_args (Sequence[str]):
The arguments to the command. The Docker image's CMD is used if this is
not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
model_serving_container_environment_variables (Dict[str, str]):
The environment variables that are to be present in the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
model_serving_container_ports (Sequence[int]):
Declaration of ports that are exposed by the container. This field is
primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
model_description (str):
The description of the Model.
model_instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
model_parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
model_prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
project (str):
Project to run training in. Overrides project set in aiplatform.init.
location (str):
Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Bucket used to stage source and training artifacts. Overrides
staging_bucket set in aiplatform.init.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
container_uri=container_uri,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_description=model_description,
staging_bucket=staging_bucket,
)
self._command = command
def run(
self,
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
reduction_server_replica_count: int = 0,
reduction_server_machine_type: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Runs the custom training job.
Distributed Training Support:
If replica count = 1 then one chief replica will be provisioned. If
replica_count > 1 the remainder will be provisioned as a worker replica pool.
ie: replica_count = 10 will result in 1 chief and 9 workers
All replicas have same machine_type, accelerator_type, and accelerator_count
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (Union[datasets.ImageDataset,datasets.TabularDataset,datasets.TextDataset,datasets.VideoDataset]):
Vertex AI to fit this training against. Custom training script should
retrieve datasets through passed in environment variables uris:
os.environ["AIP_TRAINING_DATA_URI"]
os.environ["AIP_VALIDATION_DATA_URI"]
os.environ["AIP_TEST_DATA_URI"]
Additionally the dataset format is passed in as:
os.environ["AIP_DATA_FORMAT"]
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files
that can be used here are found in
gs://google-cloud-aiplatform/schema/dataset/annotation/,
note that the chosen schema must be consistent with
``metadata``
of the Dataset specified by
``dataset_id``.
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
model_display_name (str):
If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
bigquery_destination (str):
Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
args (List[Unions[str, int, float]]):
Command line arguments to be passed to the Python script.
environment_variables (Dict[str, str]):
Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
environment_variables = {
'MY_KEY': 'MY_VALUE'
}
replica_count (int):
The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
machine_type (str):
The type of machine to use for training.
accelerator_type (str):
Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
accelerator_count (int):
The number of accelerators to attach to a worker replica.
boot_disk_type (str):
Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
boot_disk_size_gb (int):
Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
reduction_server_replica_count (int):
The number of reduction server replicas, default is 0.
reduction_server_machine_type (str):
Optional. The type of machine to use for reduction server.
reduction_server_container_uri (str):
Optional. The Uri of the reduction server container image.
See details: https://cloud.google.com/vertex-ai/docs/training/distributed-training#reduce_training_time_with_reduction_server
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
Raises:
RuntimeError: If Training job has already been run, staging_bucket has not
been set, or model_display_name was provided but required arguments
were not provided in constructor.
"""
worker_pool_specs, managed_model = self._prepare_and_validate_run(
model_display_name=model_display_name,
model_labels=model_labels,
replica_count=replica_count,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
reduction_server_replica_count=reduction_server_replica_count,
reduction_server_machine_type=reduction_server_machine_type,
)
return self._run(
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
worker_pool_specs=worker_pool_specs,
managed_model=managed_model,
args=args,
environment_variables=environment_variables,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
timeout=timeout,
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
reduction_server_container_uri=reduction_server_container_uri
if reduction_server_replica_count > 0
else None,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync(construct_object_on_arg="managed_model")
def _run(
self,
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
],
annotation_schema_uri: Optional[str],
worker_pool_specs: worker_spec_utils._DistributedTrainingSpec,
managed_model: Optional[gca_model.Model] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Packages local script and launches training_job.
Args:
dataset (
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
):
Vertex AI to fit this training against.
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema.
worker_pools_spec (worker_spec_utils._DistributedTrainingSpec):
Worker pools pecs required to run job.
managed_model (gca_model.Model):
Model proto if this script produces a Managed Model.
args (List[Unions[str, int, float]]):
Command line arguments to be passed to the Python script.
environment_variables (Dict[str, str]):
Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
environment_variables = {
'MY_KEY': 'MY_VALUE'
}
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
bigquery_destination (str):
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
reduction_server_container_uri (str):
Optional. The Uri of the reduction server container image.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
for spec_order, spec in enumerate(worker_pool_specs):
if not spec:
continue
if (
spec_order == worker_spec_utils._SPEC_ORDERS["server_spec"]
and reduction_server_container_uri
):
spec["container_spec"] = {
"image_uri": reduction_server_container_uri,
}
else:
spec["containerSpec"] = {"imageUri": self._container_uri}
if self._command:
spec["containerSpec"]["command"] = self._command
if args:
spec["containerSpec"]["args"] = args
if environment_variables:
spec["containerSpec"]["env"] = [
{"name": key, "value": value}
for key, value in environment_variables.items()
]
(
training_task_inputs,
base_output_dir,
) = self._prepare_training_task_inputs_and_output_dir(
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
timeout=timeout,
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
)
model = self._run_job(
training_task_definition=schema.training_job.definition.custom_task,
training_task_inputs=training_task_inputs,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
model=managed_model,
gcs_destination_uri_prefix=base_output_dir,
bigquery_destination=bigquery_destination,
create_request_timeout=create_request_timeout,
)
return model
class AutoMLTabularTrainingJob(_TrainingJob):
_supported_training_schemas = (schema.training_job.definition.automl_tabular,)
def __init__(
self,
display_name: str,
optimization_prediction_type: str,
optimization_objective: Optional[str] = None,
column_specs: Optional[Dict[str, str]] = None,
column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
):
"""Constructs a AutoML Tabular Training Job.
Example usage:
job = training_jobs.AutoMLTabularTrainingJob(
display_name="my_display_name",
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
column_specs={"column_1": "auto", "column_2": "numeric"},
labels={'key': 'value'},
)
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
optimization_prediction_type (str):
The type of prediction the Model is to produce.
"classification" - Predict one out of multiple target values is
picked for each row.
"regression" - Predict a value based on its relation to other values.
This type is available only to columns that contain
semantically numeric values, i.e. integers or floating
point number, even if stored as e.g. strings.
optimization_objective (str):
Optional. Objective function the Model is to be optimized towards. The training
task creates a Model that maximizes/minimizes the value of the objective
function over the validation set.
The supported optimization objectives depend on the prediction type, and
in the case of classification also the number of distinct values in the
target column (two distint values -> binary, 3 or more distinct values
-> multi class).
If the field is not set, the default objective function is used.
Classification (binary):
"maximize-au-roc" (default) - Maximize the area under the receiver
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under the precision-recall curve.
"maximize-precision-at-recall" - Maximize precision for a specified
recall value.
"maximize-recall-at-precision" - Maximize recall for a specified
precision value.
Classification (multi class):
"minimize-log-loss" (default) - Minimize log loss.
Regression:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
column_specs (Dict[str, str]):
Optional. Alternative to column_transformations where the keys of the dict
are column names and their respective values are one of
AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column
should be flattened using "." as the delimiter. Only columns with no child
should have a transformation.
If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have
no transformations defined on.
Only one of column_transformations or column_specs should be passed. If none
of column_transformations or column_specs is passed, the local credentials
being used will try setting column_specs to "auto". To do this, the local
credentials require read access to the GCS or BigQuery training data source.
column_transformations (List[Dict[str, Dict[str, str]]]):
Optional. Transformations to apply to the input columns (i.e. columns other
than the targetColumn). Each transformation may produce multiple
result values from the column's value, and all are used for training.
When creating transformation for BigQuery Struct column, the column
should be flattened using "." as the delimiter. Only columns with no child
should have a transformation.
If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have
no transformations defined on.
Only one of column_transformations or column_specs should be passed.
Consider using column_specs as column_transformations will be deprecated
eventually. If none of column_transformations or column_specs is passed,
the local credentials being used will try setting column_transformations to
"auto". To do this, the local credentials require read access to the GCS or
BigQuery training data source.
optimization_objective_recall_value (float):
Optional. Required when maximize-precision-at-recall optimizationObjective was
picked, represents the recall value at which the optimization is done.
The minimum value is 0 and the maximum is 1.0.
optimization_objective_precision_value (float):
Optional. Required when maximize-recall-at-precision optimizationObjective was
picked, represents the precision value at which the optimization is
done.
The minimum value is 0 and the maximum is 1.0.
project (str):
Optional. Project to run training in. Overrides project set in aiplatform.init.
location (str):
Optional. Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
Raises:
ValueError: If both column_transformations and column_specs were provided.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
self._column_transformations = (
column_transformations_utils.validate_and_get_column_transformations(
column_specs, column_transformations
)
)
self._optimization_objective = optimization_objective
self._optimization_prediction_type = optimization_prediction_type
self._optimization_objective_recall_value = optimization_objective_recall_value
self._optimization_objective_precision_value = (
optimization_objective_precision_value
)
self._additional_experiments = []
def run(
self,
dataset: datasets.TabularDataset,
target_column: str,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
additional_experiments: Optional[List[str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (datasets.TabularDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
target_column (str):
Required. The name of the column values of which the Model is to predict.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
This parameter must be used with training_fraction_split, validation_fraction_split and test_fraction_split.
weight_column (str):
Optional. Name of the column that should be used as the weight column.
Higher values in this column give more importance to the row
during Model training. The column must have numeric values between 0 and
10000 inclusively, and 0 value means that the row is ignored.
If the weight column field is not set, then all rows are assumed to have
equal weight of 1.
budget_milli_node_hours (int):
Optional. The train budget of creating this Model, expressed in milli node
hours i.e. 1,000 value in this field means 1 node hour.
The training cost of the model will not exceed this budget. The final
cost will be attempted to be close to the budget, though may end up
being (even) noticeably smaller - at the backend's discretion. This
especially may happen when further model training ceases to provide
any improvements.
If the budget is set to a value known to be insufficient to train a
Model for the given training set, the training won't be attempted and
will error.
The minimum value is 1000 and the maximum is 72000.
model_display_name (str):
Optional. If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
disable_early_stopping (bool):
Required. If true, the entire budget is used. This disables the early stopping
feature. By default, the early stopping feature is enabled, which means
that training might stop before the entire training budget has been
used, if further training does no longer brings significant improvement
to the model.
export_evaluated_data_items (bool):
Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
export_evaluated_data_items_bigquery_destination_uri (string):
Optional. URI of desired destination BigQuery table for exported test set predictions.
Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery
table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
export_evaluated_data_items_override_destination (bool):
Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri],
if the table exists, for exported test set predictions. If False, and the
table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
additional_experiments (List[str]):
Optional. Additional experiment flags for the automl tables training.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
Raises:
RuntimeError: If Training job has already been run or is waiting to run.
"""
if model_display_name:
utils.validate_display_name(model_display_name)
if model_labels:
utils.validate_labels(model_labels)
if self._is_waiting_to_run():
raise RuntimeError("AutoML Tabular Training is already scheduled to run.")
if self._has_run:
raise RuntimeError("AutoML Tabular Training has already run.")
if additional_experiments:
self._add_additional_experiments(additional_experiments)
return self._run(
dataset=dataset,
target_column=target_column,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
weight_column=weight_column,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=export_evaluated_data_items_bigquery_destination_uri,
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync()
def _run(
self,
dataset: datasets.TabularDataset,
target_column: str,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (datasets.TabularDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
target_column (str):
Required. The name of the column values of which the Model is to predict.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
This parameter must be used with training_fraction_split, validation_fraction_split and test_fraction_split.
weight_column (str):
Optional. Name of the column that should be used as the weight column.
Higher values in this column give more importance to the row
during Model training. The column must have numeric values between 0 and
10000 inclusively, and 0 value means that the row is ignored.
If the weight column field is not set, then all rows are assumed to have
equal weight of 1.
budget_milli_node_hours (int):
Optional. The train budget of creating this Model, expressed in milli node
hours i.e. 1,000 value in this field means 1 node hour.
The training cost of the model will not exceed this budget. The final
cost will be attempted to be close to the budget, though may end up
being (even) noticeably smaller - at the backend's discretion. This
especially may happen when further model training ceases to provide
any improvements.
If the budget is set to a value known to be insufficient to train a
Model for the given training set, the training won't be attempted and
will error.
The minimum value is 1000 and the maximum is 72000.
model_display_name (str):
Optional. If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
disable_early_stopping (bool):
Required. If true, the entire budget is used. This disables the early stopping
feature. By default, the early stopping feature is enabled, which means
that training might stop before the entire training budget has been
used, if further training does no longer brings significant improvement
to the model.
export_evaluated_data_items (bool):
Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
export_evaluated_data_items_bigquery_destination_uri (string):
Optional. URI of desired destination BigQuery table for exported test set predictions.
Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery
table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
export_evaluated_data_items_override_destination (bool):
Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri],
if the table exists, for exported test set predictions. If False, and the
table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
training_task_definition = schema.training_job.definition.automl_tabular
# auto-populate transformations
if self._column_transformations is None:
_LOGGER.info(
"No column transformations provided, so now retrieving columns from dataset in order to set default column transformations."
)
(
self._column_transformations,
column_names,
) = column_transformations_utils.get_default_column_transformations(
dataset=dataset, target_column=target_column
)
_LOGGER.info(
"The column transformation of type 'auto' was set for the following columns: %s."
% column_names
)
training_task_inputs_dict = {
# required inputs
"targetColumn": target_column,
"transformations": self._column_transformations,
"trainBudgetMilliNodeHours": budget_milli_node_hours,
# optional inputs
"weightColumnName": weight_column,
"disableEarlyStopping": disable_early_stopping,
"optimizationObjective": self._optimization_objective,
"predictionType": self._optimization_prediction_type,
"optimizationObjectiveRecallValue": self._optimization_objective_recall_value,
"optimizationObjectivePrecisionValue": self._optimization_objective_precision_value,
}
final_export_eval_bq_uri = export_evaluated_data_items_bigquery_destination_uri
if final_export_eval_bq_uri and not final_export_eval_bq_uri.startswith(
"bq://"
):
final_export_eval_bq_uri = f"bq://{final_export_eval_bq_uri}"
if export_evaluated_data_items:
training_task_inputs_dict["exportEvaluatedDataItemsConfig"] = {
"destinationBigqueryUri": final_export_eval_bq_uri,
"overrideExistingTable": export_evaluated_data_items_override_destination,
}
if self._additional_experiments:
training_task_inputs_dict[
"additionalExperiments"
] = self._additional_experiments
model = gca_model.Model(
display_name=model_display_name or self._display_name,
labels=model_labels or self._labels,
encryption_spec=self._model_encryption_spec,
)
return self._run_job(
training_task_definition=training_task_definition,
training_task_inputs=training_task_inputs_dict,
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
model=model,
create_request_timeout=create_request_timeout,
)
@property
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
return (
f"Training Pipeline {self.resource_name} is not configured to upload a "
"Model."
)
def _add_additional_experiments(self, additional_experiments: List[str]):
"""Add experiment flags to the training job.
Args:
additional_experiments (List[str]):
Experiment flags that can enable some experimental training features.
"""
self._additional_experiments.extend(additional_experiments)
@staticmethod
def get_auto_column_specs(
dataset: datasets.TabularDataset,
target_column: str,
) -> Dict[str, str]:
"""Returns a dict with all non-target columns as keys and 'auto' as values.
Example usage:
column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
dataset=my_dataset,
target_column="my_target_column",
)
Args:
dataset (datasets.TabularDataset):
Required. Intended dataset.
target_column(str):
Required. Intended target column.
Returns:
Dict[str, str]
Column names as keys and 'auto' as values
"""
column_names = [
column for column in dataset.column_names if column != target_column
]
column_specs = {column: "auto" for column in column_names}
return column_specs
class column_data_types:
AUTO = "auto"
NUMERIC = "numeric"
CATEGORICAL = "categorical"
TIMESTAMP = "timestamp"
TEXT = "text"
REPEATED_NUMERIC = "repeated_numeric"
REPEATED_CATEGORICAL = "repeated_categorical"
REPEATED_TEXT = "repeated_text"
class AutoMLForecastingTrainingJob(_TrainingJob):
_supported_training_schemas = (schema.training_job.definition.automl_forecasting,)
def __init__(
self,
display_name: str,
optimization_objective: Optional[str] = None,
column_specs: Optional[Dict[str, str]] = None,
column_transformations: Optional[List[Dict[str, Dict[str, str]]]] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
):
"""Constructs a AutoML Forecasting Training Job.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
optimization_objective (str):
Optional. Objective function the model is to be optimized towards.
The training process creates a Model that optimizes the value of the objective
function over the validation set. The supported optimization objectives:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
"minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE).
"minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE)
and mean-absolute-error (MAE).
"minimize-quantile-loss" - Minimize the quantile loss at the defined quantiles.
(Set this objective to build quantile forecasts.)
column_specs (Dict[str, str]):
Optional. Alternative to column_transformations where the keys of the dict
are column names and their respective values are one of
AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column
should be flattened using "." as the delimiter. Only columns with no child
should have a transformation.
If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have
no transformations defined on.
Only one of column_transformations or column_specs should be passed.
column_transformations (List[Dict[str, Dict[str, str]]]):
Optional. Transformations to apply to the input columns (i.e. columns other
than the targetColumn). Each transformation may produce multiple
result values from the column's value, and all are used for training.
When creating transformation for BigQuery Struct column, the column
should be flattened using "." as the delimiter. Only columns with no child
should have a transformation.
If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have
no transformations defined on.
Only one of column_transformations or column_specs should be passed.
Consider using column_specs as column_transformations will be deprecated eventually.
project (str):
Optional. Project to run training in. Overrides project set in aiplatform.init.
location (str):
Optional. Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
Raises:
ValueError: If both column_transformations and column_specs were provided.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
self._column_transformations = (
column_transformations_utils.validate_and_get_column_transformations(
column_specs, column_transformations
)
)
self._optimization_objective = optimization_objective
self._additional_experiments = []
def run(
self,
dataset: datasets.TimeSeriesDataset,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: List[str],
available_at_forecast_columns: List[str],
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
time_series_attribute_columns: Optional[List[str]] = None,
context_window: Optional[int] = None,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
quantiles: Optional[List[float]] = None,
validation_options: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
additional_experiments: Optional[List[str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (datasets.TimeSeriesDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For time series Datasets, all their data is exported to
training, to pick and choose from.
target_column (str):
Required. Name of the column that the Model is to predict values for. This
column must be unavailable at forecast.
time_column (str):
Required. Name of the column that identifies time order in the time series.
This column must be available at forecast.
time_series_identifier_column (str):
Required. Name of the column that identifies the time series.
unavailable_at_forecast_columns (List[str]):
Required. Column names of columns that are unavailable at forecast.
Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is unknown before the forecast
(e.g. population of a city in a given year, or weather on a given day).
available_at_forecast_columns (List[str]):
Required. Column names of columns that are available at forecast.
Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is known at forecast.
forecast_horizon: (int):
Required. The amount of time into the future for which forecasted values for the target are
returned. Expressed in number of units defined by the [data_granularity_unit] and
[data_granularity_count] field. Inclusive.
data_granularity_unit (str):
Required. The data granularity unit. Accepted values are ``minute``,
``hour``, ``day``, ``week``, ``month``, ``year``.
data_granularity_count (int):
Required. The number of data granularity units between data points in the training
data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all other
values of [data_granularity_unit], must be 1.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``TRAIN``,
``VALIDATE``, ``TEST``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
weight_column (str):
Optional. Name of the column that should be used as the weight column.
Higher values in this column give more importance to the row
during Model training. The column must have numeric values between 0 and
10000 inclusively, and 0 value means that the row is ignored.
If the weight column field is not set, then all rows are assumed to have
equal weight of 1. This column must be available at forecast.
time_series_attribute_columns (List[str]):
Optional. Column names that should be used as attribute columns.
Each column is constant within a time series.
context_window (int):
Optional. The amount of time into the past training and prediction data is used for
model training and prediction respectively. Expressed in number of units defined by the
[data_granularity_unit] and [data_granularity_count] fields. When not provided uses the
default value of 0 which means the model sets each series context window to be 0 (also
known as "cold start"). Inclusive.
export_evaluated_data_items (bool):
Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
export_evaluated_data_items_bigquery_destination_uri (string):
Optional. URI of desired destination BigQuery table for exported test set predictions.
Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery
table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
export_evaluated_data_items_override_destination (bool):
Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri],
if the table exists, for exported test set predictions. If False, and the
table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
quantiles (List[float]):
Quantiles to use for the `minimize-quantile-loss`
[AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in
this case.
Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive.
Each quantile must be unique.
validation_options (str):
Validation options for the data validation component. The available options are:
"fail-pipeline" - (default), will validate against the validation and fail the pipeline
if it fails.
"ignore-validation" - ignore the results of the validation and continue the pipeline
budget_milli_node_hours (int):
Optional. The train budget of creating this Model, expressed in milli node
hours i.e. 1,000 value in this field means 1 node hour.
The training cost of the model will not exceed this budget. The final
cost will be attempted to be close to the budget, though may end up
being (even) noticeably smaller - at the backend's discretion. This
especially may happen when further model training ceases to provide
any improvements.
If the budget is set to a value known to be insufficient to train a
Model for the given training set, the training won't be attempted and
will error.
The minimum value is 1000 and the maximum is 72000.
model_display_name (str):
Optional. If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
additional_experiments (List[str]):
Optional. Additional experiment flags for the time series forcasting training.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
Raises:
RuntimeError: If Training job has already been run or is waiting to run.
"""
if model_display_name:
utils.validate_display_name(model_display_name)
if model_labels:
utils.validate_labels(model_labels)
if self._is_waiting_to_run():
raise RuntimeError(
"AutoML Forecasting Training is already scheduled to run."
)
if self._has_run:
raise RuntimeError("AutoML Forecasting Training has already run.")
if additional_experiments:
self._add_additional_experiments(additional_experiments)
return self._run(
dataset=dataset,
target_column=target_column,
time_column=time_column,
time_series_identifier_column=time_series_identifier_column,
unavailable_at_forecast_columns=unavailable_at_forecast_columns,
available_at_forecast_columns=available_at_forecast_columns,
forecast_horizon=forecast_horizon,
data_granularity_unit=data_granularity_unit,
data_granularity_count=data_granularity_count,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
weight_column=weight_column,
time_series_attribute_columns=time_series_attribute_columns,
context_window=context_window,
budget_milli_node_hours=budget_milli_node_hours,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=export_evaluated_data_items_bigquery_destination_uri,
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
quantiles=quantiles,
validation_options=validation_options,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync()
def _run(
self,
dataset: datasets.TimeSeriesDataset,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: List[str],
available_at_forecast_columns: List[str],
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
time_series_attribute_columns: Optional[List[str]] = None,
context_window: Optional[int] = None,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
quantiles: Optional[List[float]] = None,
validation_options: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (datasets.TimeSeriesDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For time series Datasets, all their data is exported to
training, to pick and choose from.
target_column (str):
Required. Name of the column that the Model is to predict values for. This
column must be unavailable at forecast.
time_column (str):
Required. Name of the column that identifies time order in the time series.
This column must be available at forecast.
time_series_identifier_column (str):
Required. Name of the column that identifies the time series.
unavailable_at_forecast_columns (List[str]):
Required. Column names of columns that are unavailable at forecast.
Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is unknown before the forecast
(e.g. population of a city in a given year, or weather on a given day).
available_at_forecast_columns (List[str]):
Required. Column names of columns that are available at forecast.
Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is known at forecast.
forecast_horizon: (int):
Required. The amount of time into the future for which forecasted values for the target are
returned. Expressed in number of units defined by the [data_granularity_unit] and
[data_granularity_count] field. Inclusive.
data_granularity_unit (str):
Required. The data granularity unit. Accepted values are ``minute``,
``hour``, ``day``, ``week``, ``month``, ``year``.
data_granularity_count (int):
Required. The number of data granularity units between data points in the training
data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all other
values of [data_granularity_unit], must be 1.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
weight_column (str):
Optional. Name of the column that should be used as the weight column.
Higher values in this column give more importance to the row
during Model training. The column must have numeric values between 0 and
10000 inclusively, and 0 value means that the row is ignored.
If the weight column field is not set, then all rows are assumed to have
equal weight of 1. This column must be available at forecast.
time_series_attribute_columns (List[str]):
Optional. Column names that should be used as attribute columns.
Each column is constant within a time series.
context_window (int):
Optional. The number of periods offset into the past to restrict past sequence, where each
period is one unit of granularity as defined by [period]. When not provided uses the
default value of 0 which means the model sets each series historical window to be 0 (also
known as "cold start"). Inclusive.
export_evaluated_data_items (bool):
Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
export_evaluated_data_items_bigquery_destination_uri (string):
Optional. URI of desired destination BigQuery table for exported test set predictions.
Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery
table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
export_evaluated_data_items_override_destination (bool):
Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri],
if the table exists, for exported test set predictions. If False, and the
table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
quantiles (List[float]):
Quantiles to use for the `minimize-quantile-loss`
[AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in
this case.
Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive.
Each quantile must be unique.
validation_options (str):
Validation options for the data validation component. The available options are:
"fail-pipeline" - (default), will validate against the validation and fail the pipeline
if it fails.
"ignore-validation" - ignore the results of the validation and continue the pipeline
budget_milli_node_hours (int):
Optional. The train budget of creating this Model, expressed in milli node
hours i.e. 1,000 value in this field means 1 node hour.
The training cost of the model will not exceed this budget. The final
cost will be attempted to be close to the budget, though may end up
being (even) noticeably smaller - at the backend's discretion. This
especially may happen when further model training ceases to provide
any improvements.
If the budget is set to a value known to be insufficient to train a
Model for the given training set, the training won't be attempted and
will error.
The minimum value is 1000 and the maximum is 72000.
model_display_name (str):
Optional. If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
training_task_definition = schema.training_job.definition.automl_forecasting
# auto-populate transformations
if self._column_transformations is None:
_LOGGER.info(
"No column transformations provided, so now retrieving columns from dataset in order to set default column transformations."
)
(
self._column_transformations,
column_names,
) = dataset._get_default_column_transformations(target_column)
_LOGGER.info(
"The column transformation of type 'auto' was set for the following columns: %s."
% column_names
)
training_task_inputs_dict = {
# required inputs
"targetColumn": target_column,
"timeColumn": time_column,
"timeSeriesIdentifierColumn": time_series_identifier_column,
"timeSeriesAttributeColumns": time_series_attribute_columns,
"unavailableAtForecastColumns": unavailable_at_forecast_columns,
"availableAtForecastColumns": available_at_forecast_columns,
"forecastHorizon": forecast_horizon,
"dataGranularity": {
"unit": data_granularity_unit,
"quantity": data_granularity_count,
},
"transformations": self._column_transformations,
"trainBudgetMilliNodeHours": budget_milli_node_hours,
# optional inputs
"weightColumn": weight_column,
"contextWindow": context_window,
"quantiles": quantiles,
"validationOptions": validation_options,
"optimizationObjective": self._optimization_objective,
}
final_export_eval_bq_uri = export_evaluated_data_items_bigquery_destination_uri
if final_export_eval_bq_uri and not final_export_eval_bq_uri.startswith(
"bq://"
):
final_export_eval_bq_uri = f"bq://{final_export_eval_bq_uri}"
if export_evaluated_data_items:
training_task_inputs_dict["exportEvaluatedDataItemsConfig"] = {
"destinationBigqueryUri": final_export_eval_bq_uri,
"overrideExistingTable": export_evaluated_data_items_override_destination,
}
if self._additional_experiments:
training_task_inputs_dict[
"additionalExperiments"
] = self._additional_experiments
model = gca_model.Model(
display_name=model_display_name or self._display_name,
labels=model_labels or self._labels,
encryption_spec=self._model_encryption_spec,
)
new_model = self._run_job(
training_task_definition=training_task_definition,
training_task_inputs=training_task_inputs_dict,
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=None, # Not supported by AutoMLForecasting
model=model,
create_request_timeout=create_request_timeout,
)
if export_evaluated_data_items:
_LOGGER.info(
"Exported examples available at:\n%s"
% self.evaluated_data_items_bigquery_uri
)
return new_model
@property
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
return (
f"Training Pipeline {self.resource_name} is not configured to upload a "
"Model."
)
@property
def evaluated_data_items_bigquery_uri(self) -> Optional[str]:
"""BigQuery location of exported evaluated examples from the Training Job
Returns:
str: BigQuery uri for the exported evaluated examples if the export
feature is enabled for training.
None: If the export feature was not enabled for training.
"""
self._assert_gca_resource_is_available()
metadata = self._gca_resource.training_task_metadata
if metadata and "evaluatedDataItemsBigqueryUri" in metadata:
return metadata["evaluatedDataItemsBigqueryUri"]
return None
def _add_additional_experiments(self, additional_experiments: List[str]):
"""Add experiment flags to the training job.
Args:
additional_experiments (List[str]):
Experiment flags that can enable some experimental training features.
"""
self._additional_experiments.extend(additional_experiments)
class AutoMLImageTrainingJob(_TrainingJob):
_supported_training_schemas = (
schema.training_job.definition.automl_image_classification,
schema.training_job.definition.automl_image_object_detection,
)
def __init__(
self,
display_name: str,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: Optional[models.Model] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
):
"""Constructs a AutoML Image Training Job.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
prediction_type (str):
The type of prediction the Model is to produce, one of:
"classification" - Predict one out of multiple target values is
picked for each row.
"object_detection" - Predict a value based on its relation to other values.
This type is available only to columns that contain
semantically numeric values, i.e. integers or floating
point number, even if stored as e.g. strings.
multi_label: bool = False
Required. Default is False.
If false, a single-label (multi-class) Model will be trained
(i.e. assuming that for each image just up to one annotation may be
applicable). If true, a multi-label Model will be trained (i.e.
assuming that for each image multiple annotations may be applicable).
This is only applicable for the "classification" prediction_type and
will be ignored otherwise.
model_type: str = "CLOUD"
Required. One of the following:
"CLOUD" - Default for Image Classification.
A Model best tailored to be used within Google Cloud, and
which cannot be exported.
"CLOUD_HIGH_ACCURACY_1" - Default for Image Object Detection.
A model best tailored to be used within Google Cloud, and
which cannot be exported. Expected to have a higher latency,
but should also have a higher prediction quality than other
cloud models.
"CLOUD_LOW_LATENCY_1" - A model best tailored to be used within
Google Cloud, and which cannot be exported. Expected to have a
low latency, but may have lower prediction quality than other
cloud models.
"MOBILE_TF_LOW_LATENCY_1" - A model that, in addition to being
available within Google Cloud, can also be exported as TensorFlow
or Core ML model and used on a mobile or edge device afterwards.
Expected to have low latency, but may have lower prediction
quality than other mobile models.
"MOBILE_TF_VERSATILE_1" - A model that, in addition to being
available within Google Cloud, can also be exported as TensorFlow
or Core ML model and used on a mobile or edge device with afterwards.
"MOBILE_TF_HIGH_ACCURACY_1" - A model that, in addition to being
available within Google Cloud, can also be exported as TensorFlow
or Core ML model and used on a mobile or edge device afterwards.
Expected to have a higher latency, but should also have a higher
prediction quality than other mobile models.
base_model: Optional[models.Model] = None
Optional. Only permitted for Image Classification models.
If it is specified, the new model will be trained based on the `base` model.
Otherwise, the new model will be trained from scratch. The `base` model
must be in the same Project and Location as the new Model to train,
and have the same model_type.
project (str):
Optional. Project to run training in. Overrides project set in aiplatform.init.
location (str):
Optional. Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
Raises:
ValueError: When an invalid prediction_type or model_type is provided.
"""
valid_model_types = constants.AUTOML_IMAGE_PREDICTION_MODEL_TYPES.get(
prediction_type, None
)
if not valid_model_types:
raise ValueError(
f"'{prediction_type}' is not a supported prediction type for AutoML Image Training. "
f"Please choose one of: {tuple(constants.AUTOML_IMAGE_PREDICTION_MODEL_TYPES.keys())}."
)
# Override default model_type for object_detection
if model_type == "CLOUD" and prediction_type == "object_detection":
model_type = "CLOUD_HIGH_ACCURACY_1"
if model_type not in valid_model_types:
raise ValueError(
f"'{model_type}' is not a supported model_type for prediction_type of '{prediction_type}'. "
f"Please choose one of: {tuple(valid_model_types)}"
)
if base_model and prediction_type != "classification":
raise ValueError(
"Training with a `base_model` is only supported in AutoML Image Classification. "
f"However '{prediction_type}' was provided as `prediction_type`."
)
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
self._model_type = model_type
self._prediction_type = prediction_type
self._multi_label = multi_label
self._base_model = base_model
def run(
self,
dataset: datasets.ImageDataset,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
budget_milli_node_hours: Optional[int] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: bool = False,
sync: bool = True,
create_request_timeout: Optional[float] = False,
) -> models.Model:
"""Runs the AutoML Image training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Args:
dataset (datasets.ImageDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
budget_milli_node_hours (int):
Optional. The train budget of creating this Model, expressed in milli node
hours i.e. 1,000 value in this field means 1 node hour.
Defaults by `prediction_type`:
`classification` - For Cloud models the budget must be: 8,000 - 800,000
milli node hours (inclusive). The default value is 192,000 which
represents one day in wall time, assuming 8 nodes are used.
`object_detection` - For Cloud models the budget must be: 20,000 - 900,000
milli node hours (inclusive). The default value is 216,000 which represents
one day in wall time, assuming 9 nodes are used.
The training cost of the model will not exceed this budget. The final
cost will be attempted to be close to the budget, though may end up
being (even) noticeably smaller - at the backend's discretion. This
especially may happen when further model training ceases to provide
any improvements. If the budget is set to a value known to be insufficient to
train a Model for the given training set, the training won't be attempted and
will error.
model_display_name (str):
Optional. The display name of the managed Vertex AI Model. The name
can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
disable_early_stopping: bool = False
Required. If true, the entire budget is used. This disables the early stopping
feature. By default, the early stopping feature is enabled, which means
that training might stop before the entire training budget has been
used, if further training does no longer brings significant improvement
to the model.
sync: bool = True
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
Raises:
RuntimeError: If Training job has already been run or is waiting to run.
"""
if model_display_name:
utils.validate_display_name(model_display_name)
if model_labels:
utils.validate_labels(model_labels)
if self._is_waiting_to_run():
raise RuntimeError("AutoML Image Training is already scheduled to run.")
if self._has_run:
raise RuntimeError("AutoML Image Training has already run.")
return self._run(
dataset=dataset,
base_model=self._base_model,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync()
def _run(
self,
dataset: datasets.ImageDataset,
base_model: Optional[models.Model] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: bool = False,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Args:
dataset (datasets.ImageDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
base_model: Optional[models.Model] = None
Optional. Only permitted for Image Classification models.
If it is specified, the new model will be trained based on the `base` model.
Otherwise, the new model will be trained from scratch. The `base` model
must be in the same Project and Location as the new Model to train,
and have the same model_type.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
budget_milli_node_hours (int):
Optional. The train budget of creating this Model, expressed in milli node
hours i.e. 1,000 value in this field means 1 node hour.
The training cost of the model will not exceed this budget. The final
cost will be attempted to be close to the budget, though may end up
being (even) noticeably smaller - at the backend's discretion. This
especially may happen when further model training ceases to provide
any improvements.
If the budget is set to a value known to be insufficient to train a
Model for the given training set, the training won't be attempted and
will error.
The minimum value is 1000 and the maximum is 72000.
model_display_name (str):
Optional. The display name of the managed Vertex AI Model. The name
can be up to 128 characters long and can be consist of any UTF-8
characters. If a `base_model` was provided, the display_name in the
base_model will be overritten with this value. If not provided upon
creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
disable_early_stopping (bool):
Required. If true, the entire budget is used. This disables the early stopping
feature. By default, the early stopping feature is enabled, which means
that training might stop before the entire training budget has been
used, if further training does no longer brings significant improvement
to the model.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
# Retrieve the objective-specific training task schema based on prediction_type
training_task_definition = getattr(
schema.training_job.definition, f"automl_image_{self._prediction_type}"
)
training_task_inputs_dict = {
# required inputs
"modelType": self._model_type,
"budgetMilliNodeHours": budget_milli_node_hours,
# optional inputs
"disableEarlyStopping": disable_early_stopping,
}
if self._prediction_type == "classification":
training_task_inputs_dict["multiLabel"] = self._multi_label
# gca Model to be trained
model_tbt = gca_model.Model(encryption_spec=self._model_encryption_spec)
model_tbt.display_name = model_display_name or self._display_name
model_tbt.labels = model_labels or self._labels
if base_model:
# Use provided base_model to pass to model_to_upload causing the
# description and labels from base_model to be passed onto the new model
model_tbt.description = getattr(base_model._gca_resource, "description")
model_tbt.labels = getattr(base_model._gca_resource, "labels")
# Set ID of Vertex AI Model to base this training job off of
training_task_inputs_dict["baseModelId"] = base_model.name
return self._run_job(
training_task_definition=training_task_definition,
training_task_inputs=training_task_inputs_dict,
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
model=model_tbt,
create_request_timeout=create_request_timeout,
)
@property
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
return (
f"AutoML Image Training Pipeline {self.resource_name} is not "
"configured to upload a Model."
)
class CustomPythonPackageTrainingJob(_CustomTrainingJob):
"""Class to launch a Custom Training Job in Vertex AI using a Python
Package.
Takes a training implementation as a python package and executes
that package in Cloud Vertex AI Training.
"""
def __init__(
self,
display_name: str,
python_package_gcs_uri: str,
python_module_name: str,
container_uri: str,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""Constructs a Custom Training Job from a Python Package.
job = aiplatform.CustomPythonPackageTrainingJob(
display_name='test-train',
python_package_gcs_uri='gs://my-bucket/my-python-package.tar.gz',
python_module_name='my-training-python-package.task',
container_uri='gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest',
model_serving_container_image_uri='gcr.io/my-trainer/serving:1',
model_serving_container_predict_route='predict',
model_serving_container_health_route='metadata,
labels={'key': 'value'},
)
Usage with Dataset:
ds = aiplatform.TabularDataset(
'projects/my-project/locations/us-central1/datasets/12345'
)
job.run(
ds,
replica_count=1,
model_display_name='my-trained-model',
model_labels={'key': 'value'},
)
Usage without Dataset:
job.run(
replica_count=1,
model_display_name='my-trained-model',
model_labels={'key': 'value'},
)
To ensure your model gets saved in Vertex AI, write your saved model to
os.environ["AIP_MODEL_DIR"] in your provided training script.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
python_package_gcs_uri (str):
Required: GCS location of the training python package.
python_module_name (str):
Required: The module name of the training python package.
container_uri (str):
Required: Uri of the training container image in the GCR.
model_serving_container_image_uri (str):
If the training produces a managed Vertex AI Model, the URI of the
Model serving container suitable for serving the model produced by the
training script.
model_serving_container_predict_route (str):
If the training produces a managed Vertex AI Model, An HTTP path to
send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
model_serving_container_health_route (str):
If the training produces a managed Vertex AI Model, an HTTP path to
send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI
Platform.
model_serving_container_command (Sequence[str]):
The command with which the container is run. Not executed within a
shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
model_serving_container_args (Sequence[str]):
The arguments to the command. The Docker image's CMD is used if this is
not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
model_serving_container_environment_variables (Dict[str, str]):
The environment variables that are to be present in the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
model_serving_container_ports (Sequence[int]):
Declaration of ports that are exposed by the container. This field is
primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
model_description (str):
The description of the Model.
model_instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
model_parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
model_prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
project (str):
Project to run training in. Overrides project set in aiplatform.init.
location (str):
Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Bucket used to stage source and training artifacts. Overrides
staging_bucket set in aiplatform.init.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
container_uri=container_uri,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_description=model_description,
staging_bucket=staging_bucket,
)
self._package_gcs_uri = python_package_gcs_uri
self._python_module = python_module_name
def run(
self,
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
reduction_server_replica_count: int = 0,
reduction_server_machine_type: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Runs the custom training job.
Distributed Training Support:
If replica count = 1 then one chief replica will be provisioned. If
replica_count > 1 the remainder will be provisioned as a worker replica pool.
ie: replica_count = 10 will result in 1 chief and 9 workers
All replicas have same machine_type, accelerator_type, and accelerator_count
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Predefined splits:
Assigns input data to training, validation, and test sets based on the value of a provided key.
If using predefined splits, ``predefined_split_column_name`` must be provided.
Supported only for tabular Datasets.
Timestamp splits:
Assigns input data to training, validation, and test sets
based on a provided timestamps. The youngest data pieces are
assigned to training set, next to validation set, and the oldest
to the test set.
Supported only for tabular Datasets.
Args:
dataset (Union[datasets.ImageDataset,datasets.TabularDataset,datasets.TextDataset,datasets.VideoDataset,]):
Vertex AI to fit this training against. Custom training script should
retrieve datasets through passed in environment variables uris:
os.environ["AIP_TRAINING_DATA_URI"]
os.environ["AIP_VALIDATION_DATA_URI"]
os.environ["AIP_TEST_DATA_URI"]
Additionally the dataset format is passed in as:
os.environ["AIP_DATA_FORMAT"]
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files
that can be used here are found in
gs://google-cloud-aiplatform/schema/dataset/annotation/,
note that the chosen schema must be consistent with
``metadata``
of the Dataset specified by
``dataset_id``.
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
model_display_name (str):
If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
bigquery_destination (str):
Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
args (List[Unions[str, int, float]]):
Command line arguments to be passed to the Python script.
environment_variables (Dict[str, str]):
Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
environment_variables = {
'MY_KEY': 'MY_VALUE'
}
replica_count (int):
The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
machine_type (str):
The type of machine to use for training.
accelerator_type (str):
Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
accelerator_count (int):
The number of accelerators to attach to a worker replica.
boot_disk_type (str):
Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
boot_disk_size_gb (int):
Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
reduction_server_replica_count (int):
The number of reduction server replicas, default is 0.
reduction_server_machine_type (str):
Optional. The type of machine to use for reduction server.
reduction_server_container_uri (str):
Optional. The Uri of the reduction server container image.
See details: https://cloud.google.com/vertex-ai/docs/training/distributed-training#reduce_training_time_with_reduction_server
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
worker_pool_specs, managed_model = self._prepare_and_validate_run(
model_display_name=model_display_name,
model_labels=model_labels,
replica_count=replica_count,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
reduction_server_replica_count=reduction_server_replica_count,
reduction_server_machine_type=reduction_server_machine_type,
)
return self._run(
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
worker_pool_specs=worker_pool_specs,
managed_model=managed_model,
args=args,
environment_variables=environment_variables,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
bigquery_destination=bigquery_destination,
timeout=timeout,
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
reduction_server_container_uri=reduction_server_container_uri
if reduction_server_replica_count > 0
else None,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync(construct_object_on_arg="managed_model")
def _run(
self,
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
],
annotation_schema_uri: Optional[str],
worker_pool_specs: worker_spec_utils._DistributedTrainingSpec,
managed_model: Optional[gca_model.Model] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
bigquery_destination: Optional[str] = None,
timeout: Optional[int] = None,
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
) -> Optional[models.Model]:
"""Packages local script and launches training_job.
Args:
dataset (
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
):
Vertex AI to fit this training against.
annotation_schema_uri (str):
Google Cloud Storage URI points to a YAML file describing
annotation schema.
worker_pools_spec (worker_spec_utils._DistributedTrainingSpec):
Worker pools pecs required to run job.
managed_model (gca_model.Model):
Model proto if this script produces a Managed Model.
args (List[Unions[str, int, float]]):
Command line arguments to be passed to the Python script.
environment_variables (Dict[str, str]):
Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
environment_variables = {
'MY_KEY': 'MY_VALUE'
}
base_output_dir (str):
GCS output directory of job. If not provided a
timestamped directory in the staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. <base_output_dir>/logs/
service_account (str):
Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
predefined_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
timestamp_split_column_name (str):
Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
timeout (int):
The maximum job running time in seconds. The default is 7 days.
restart_job_on_worker_restart (bool):
Restarts the entire CustomJob if a worker
gets restarted. This feature can be used by
distributed training jobs that are not resilient
to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
tensorboard (str):
Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard]
resource to which this CustomJob will upload Tensorboard
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
The training script should write Tensorboard to following Vertex AI environment
variable:
AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`.
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
reduction_server_container_uri (str):
Optional. The Uri of the reduction server container image.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
for spec_order, spec in enumerate(worker_pool_specs):
if not spec:
continue
if (
spec_order == worker_spec_utils._SPEC_ORDERS["server_spec"]
and reduction_server_container_uri
):
spec["container_spec"] = {
"image_uri": reduction_server_container_uri,
}
else:
spec["python_package_spec"] = {
"executor_image_uri": self._container_uri,
"python_module": self._python_module,
"package_uris": [self._package_gcs_uri],
}
if args:
spec["python_package_spec"]["args"] = args
if environment_variables:
spec["python_package_spec"]["env"] = [
{"name": key, "value": value}
for key, value in environment_variables.items()
]
(
training_task_inputs,
base_output_dir,
) = self._prepare_training_task_inputs_and_output_dir(
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
timeout=timeout,
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
)
model = self._run_job(
training_task_definition=schema.training_job.definition.custom_task,
training_task_inputs=training_task_inputs,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
model=managed_model,
gcs_destination_uri_prefix=base_output_dir,
bigquery_destination=bigquery_destination,
create_request_timeout=create_request_timeout,
)
return model
class AutoMLVideoTrainingJob(_TrainingJob):
_supported_training_schemas = (
schema.training_job.definition.automl_video_classification,
schema.training_job.definition.automl_video_object_tracking,
schema.training_job.definition.automl_video_action_recognition,
)
def __init__(
self,
display_name: str,
prediction_type: str = "classification",
model_type: str = "CLOUD",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
):
"""Constructs a AutoML Video Training Job.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
prediction_type (str):
The type of prediction the Model is to produce, one of:
"classification" - A video classification model classifies shots
and segments in your videos according to your own defined labels.
"object_tracking" - A video object tracking model detects and tracks
multiple objects in shots and segments. You can use these
models to track objects in your videos according to your
own pre-defined, custom labels.
"action_recognition" - A video action recognition model pinpoints
the location of actions with short temporal durations (~1 second).
model_type: str = "CLOUD"
Required. One of the following:
"CLOUD" - available for "classification", "object_tracking" and "action_recognition"
A Model best tailored to be used within Google Cloud,
and which cannot be exported.
"MOBILE_VERSATILE_1" - available for "classification", "object_tracking" and "action_recognition"
A model that, in addition to being available within Google
Cloud, can also be exported (see ModelService.ExportModel)
as a TensorFlow or TensorFlow Lite model and used on a
mobile or edge device with afterwards.
"MOBILE_CORAL_VERSATILE_1" - available only for "object_tracking"
A versatile model that is meant to be exported (see
ModelService.ExportModel) and used on a Google Coral device.
"MOBILE_CORAL_LOW_LATENCY_1" - available only for "object_tracking"
A model that trades off quality for low latency, to be
exported (see ModelService.ExportModel) and used on a
Google Coral device.
"MOBILE_JETSON_VERSATILE_1" - available only for "object_tracking"
A versatile model that is meant to be exported (see
ModelService.ExportModel) and used on an NVIDIA Jetson device.
"MOBILE_JETSON_LOW_LATENCY_1" - available only for "object_tracking"
A model that trades off quality for low latency, to be
exported (see ModelService.ExportModel) and used on an
NVIDIA Jetson device.
project (str):
Optional. Project to run training in. Overrides project set in aiplatform.init.
location (str):
Optional. Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
Raises:
ValueError: When an invalid prediction_type and/or model_type is provided.
"""
valid_model_types = constants.AUTOML_VIDEO_PREDICTION_MODEL_TYPES.get(
prediction_type, None
)
if not valid_model_types:
raise ValueError(
f"'{prediction_type}' is not a supported prediction type for AutoML Video Training. "
f"Please choose one of: {tuple(constants.AUTOML_VIDEO_PREDICTION_MODEL_TYPES.keys())}."
)
if model_type not in valid_model_types:
raise ValueError(
f"'{model_type}' is not a supported model_type for prediction_type of '{prediction_type}'. "
f"Please choose one of: {tuple(valid_model_types)}"
)
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
self._model_type = model_type
self._prediction_type = prediction_type
def run(
self,
dataset: datasets.VideoDataset,
training_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the AutoML Video training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
``training_fraction_split``, and ``test_fraction_split`` may optionally
be provided, they must sum to up to 1. If none of the fractions are set,
by default roughly 80% of data will be used for training, and 20% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Args:
dataset (datasets.VideoDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
model_display_name (str):
Optional. The display name of the managed Vertex AI Model. The name
can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
sync: bool = True
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
Raises:
RuntimeError: If Training job has already been run or is waiting to run.
"""
if model_display_name:
utils.validate_display_name(model_display_name)
if model_labels:
utils.validate_labels(model_labels)
if self._is_waiting_to_run():
raise RuntimeError("AutoML Video Training is already scheduled to run.")
if self._has_run:
raise RuntimeError("AutoML Video Training has already run.")
return self._run(
dataset=dataset,
training_fraction_split=training_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync()
def _run(
self,
dataset: datasets.VideoDataset,
training_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, and ``test_fraction_split`` may optionally
be provided, they must sum to up to 1. If none of the fractions are set,
by default roughly 80% of data will be used for training, and 20% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Args:
dataset (datasets.VideoDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For tabular Datasets, all their data is exported to
training, to pick and choose from.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
model_display_name (str):
Optional. The display name of the managed Vertex AI Model. The name
can be up to 128 characters long and can be consist of any UTF-8
characters. If a `base_model` was provided, the display_name in the
base_model will be overritten with this value. If not provided upon
creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
# Retrieve the objective-specific training task schema based on prediction_type
training_task_definition = getattr(
schema.training_job.definition, f"automl_video_{self._prediction_type}"
)
training_task_inputs_dict = {
"modelType": self._model_type,
}
# gca Model to be trained
model_tbt = gca_model.Model(encryption_spec=self._model_encryption_spec)
model_tbt.display_name = model_display_name or self._display_name
model_tbt.labels = model_labels or self._labels
# AutoMLVideo does not support validation, so pass in '-' if any other filter split is provided.
validation_filter_split = (
"-"
if all([training_filter_split is not None, test_filter_split is not None])
else None
)
return self._run_job(
training_task_definition=training_task_definition,
training_task_inputs=training_task_inputs_dict,
dataset=dataset,
training_fraction_split=training_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
model=model_tbt,
create_request_timeout=create_request_timeout,
)
@property
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
return (
f"AutoML Video Training Pipeline {self.resource_name} is not "
"configured to upload a Model."
)
class AutoMLTextTrainingJob(_TrainingJob):
_supported_training_schemas = (
schema.training_job.definition.automl_text_classification,
schema.training_job.definition.automl_text_extraction,
schema.training_job.definition.automl_text_sentiment,
)
def __init__(
self,
display_name: str,
prediction_type: str,
multi_label: bool = False,
sentiment_max: int = 10,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
):
"""Constructs a AutoML Text Training Job.
Args:
display_name (str):
Required. The user-defined name of this TrainingPipeline.
prediction_type (str):
The type of prediction the Model is to produce, one of:
"classification" - A classification model analyzes text data and
returns a list of categories that apply to the text found in the data.
Vertex AI offers both single-label and multi-label text classification models.
"extraction" - An entity extraction model inspects text data
for known entities referenced in the data and
labels those entities in the text.
"sentiment" - A sentiment analysis model inspects text data and identifies the
prevailing emotional opinion within it, especially to determine a writer's attitude
as positive, negative, or neutral.
multi_label (bool):
Required and only applicable for text classification task. If false, a single-label (multi-class) Model will be trained (i.e.
assuming that for each text snippet just up to one annotation may be
applicable). If true, a multi-label Model will be trained (i.e.
assuming that for each text snippet multiple annotations may be
applicable).
sentiment_max (int):
Required and only applicable for sentiment task. A sentiment is expressed as an integer
ordinal, where higher value means a more
positive sentiment. The range of sentiments that
will be used is between 0 and sentimentMax
(inclusive on both ends), and all the values in
the range must be represented in the dataset
before a model can be created.
Only the Annotations with this sentimentMax will
be used for training. sentimentMax value must be
between 1 and 10 (inclusive).
project (str):
Optional. Project to run training in. Overrides project set in aiplatform.init.
location (str):
Optional. Location to run training in. Overrides location set in aiplatform.init.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to run call training service. Overrides
credentials set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
training_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
"""
super().__init__(
display_name=display_name,
project=project,
location=location,
credentials=credentials,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
training_task_definition: str
training_task_inputs_dict: proto.Message
if prediction_type == "classification":
training_task_definition = (
schema.training_job.definition.automl_text_classification
)
training_task_inputs_dict = (
training_job_inputs.AutoMlTextClassificationInputs(
multi_label=multi_label
)
)
elif prediction_type == "extraction":
training_task_definition = (
schema.training_job.definition.automl_text_extraction
)
training_task_inputs_dict = training_job_inputs.AutoMlTextExtractionInputs()
elif prediction_type == "sentiment":
training_task_definition = (
schema.training_job.definition.automl_text_sentiment
)
training_task_inputs_dict = training_job_inputs.AutoMlTextSentimentInputs(
sentiment_max=sentiment_max
)
else:
raise ValueError(
"Prediction type must be one of 'classification', 'extraction', or 'sentiment'."
)
self._training_task_definition = training_task_definition
self._training_task_inputs_dict = training_task_inputs_dict
def run(
self,
dataset: datasets.TextDataset,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Args:
dataset (datasets.TextDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
model_display_name (str):
Optional. The display name of the managed Vertex AI Model.
The name can be up to 128 characters long and can consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels..
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds
Returns:
model: The trained Vertex AI Model resource.
Raises:
RuntimeError: If Training job has already been run or is waiting to run.
"""
if model_display_name:
utils.validate_display_name(model_display_name)
if model_labels:
utils.validate_labels(model_labels)
if self._is_waiting_to_run():
raise RuntimeError("AutoML Text Training is already scheduled to run.")
if self._has_run:
raise RuntimeError("AutoML Text Training has already run.")
return self._run(
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
create_request_timeout=create_request_timeout,
)
@base.optional_sync()
def _run(
self,
dataset: datasets.TextDataset,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
) -> models.Model:
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations:
Data fraction splits:
Any of ``training_fraction_split``, ``validation_fraction_split`` and
``test_fraction_split`` may optionally be provided, they must sum to up to 1. If
the provided ones sum to less than 1, the remainder is assigned to sets as
decided by Vertex AI. If none of the fractions are set, by default roughly 80%
of data will be used for training, 10% for validation, and 10% for test.
Data filter splits:
Assigns input data to training, validation, and test sets
based on the given filters, data pieces not matched by any
filter are ignored. Currently only supported for Datasets
containing DataItems.
If any of the filters in this message are to match nothing, then
they can be set as '-' (the minus sign).
If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and
``test_filter_split`` must be provided.
Supported only for unstructured Datasets.
Args:
dataset (datasets.TextDataset):
Required. The dataset within the same Project from which data will be used to train the Model. The
Dataset must use schema compatible with Model being trained,
and what is compatible should be described in the used
TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
For Text Datasets, all their data is exported to
training, to pick and choose from.
training_fraction_split (float):
Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
validation_fraction_split (float):
Optional. The fraction of the input data that is to be used to validate
the Model. This is ignored if Dataset is not provided.
test_fraction_split (float):
Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
training_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
validation_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
test_filter_split (str):
Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
model_display_name (str):
Optional. If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
model_labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
Returns:
model: The trained Vertex AI Model resource or None if training did not
produce a Vertex AI Model.
"""
model = gca_model.Model(
display_name=model_display_name or self._display_name,
labels=model_labels or self._labels,
encryption_spec=self._model_encryption_spec,
)
return self._run_job(
training_task_definition=self._training_task_definition,
training_task_inputs=self._training_task_inputs_dict,
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
model=model,
create_request_timeout=create_request_timeout,
)
@property
def _model_upload_fail_string(self) -> str:
"""Helper property for model upload failure."""
return (
f"AutoML Text Training Pipeline {self.resource_name} is not "
"configured to upload a Model."
)
| 52.135315
| 159
| 0.629172
|
f74ae93b8cc6518cf4988a26caf259e0bc802844
| 564
|
py
|
Python
|
drawer.py
|
janakhpon/LearnPyDacity
|
b096f9a50496a0c01353bf953209e766de312187
|
[
"MIT"
] | null | null | null |
drawer.py
|
janakhpon/LearnPyDacity
|
b096f9a50496a0c01353bf953209e766de312187
|
[
"MIT"
] | null | null | null |
drawer.py
|
janakhpon/LearnPyDacity
|
b096f9a50496a0c01353bf953209e766de312187
|
[
"MIT"
] | null | null | null |
import turtle
def draw_square(some_turtle):
for i in range(1,5):
some_turtle.forward(100)
some_turtle.right(90)
def draw_art():
window = turtle.Screen()
window.bgcolor("blue")
img_tt = turtle.Turtle()
img_tt.shape("turtle")
img_tt.color("white")
img_tt.speed(2)
for i in range(1,37):
draw_square(img_tt)
img_tt.right(10)
"""
draw_square(img_tt)
angle = turtle.Turtle()
angle.shape("arrow")
angle.color("black")
angle.circle(100)
window.exitonclick()
"""
draw_art()
| 20.142857
| 32
| 0.611702
|
ab7fe3fb1274ec045d52c3cecb2c6b61403eef0b
| 2,368
|
py
|
Python
|
battements/sinusoide_effet_des_parametres.py
|
mabuchet/cours_terminale
|
7bb8dcacbc2b9a305ff6a7f04aea80f7f2bbc690
|
[
"MIT"
] | null | null | null |
battements/sinusoide_effet_des_parametres.py
|
mabuchet/cours_terminale
|
7bb8dcacbc2b9a305ff6a7f04aea80f7f2bbc690
|
[
"MIT"
] | null | null | null |
battements/sinusoide_effet_des_parametres.py
|
mabuchet/cours_terminale
|
7bb8dcacbc2b9a305ff6a7f04aea80f7f2bbc690
|
[
"MIT"
] | null | null | null |
"""
Auteur : Marc-Antoine BUCHET
Date : 31/03/2021
BUT :
Tracer la représentation graphique d'une sinusoïde et voir l'effet des
différents paramètres sur le signal.
"""
# Imports de bibliothèques :
import numpy as np
import matplotlib.pyplot as plt
#==============================================================================
## Paramétrage de pyplot :
#==============================================================================
# Graduations en gras et en plus gros :
font = {'family' : 'sans',
'weight' : 'bold',
'size' : 12}
plt.rc('font', **font)
# Activation du mode interactif :
plt.ion()
#==============================================================================
## Paramètres numériques :
#==============================================================================
# Nombre de points pour les graphiques :
N = 1000
#==============================================================================
## Paramètres physiques :
#==============================================================================
# Référence :
a_ref = 1. # u.a, amplitude
f_ref = 1. # Hz, fréquence
phi_ref = 0. # rad, phase à l'origine
omega_ref = 2*np.pi*f_ref # pulsation
T_ref = 1/f_ref # période
# Signal étudié :
a = 1. # u.a, amplitude
f = 1. # Hz, fréquence
phi = 0. # rad, phase à l'origine
omega = 2*np.pi*f # pulsation
T = 1/f # période
#==============================================================================
## Grandeurs à représenter :
#==============================================================================
# On calcule les abscisses du graphe (ici les dates auxquelles calculer s(t) ) :
t_min = -1.*T_ref # En nombre de périodes, c'est le plus pertinent
t_max = 2.*T_ref
t = np.linspace(t_min,t_max,N)
# Et les ordonnées :
s_ref = a_ref*np.cos(omega_ref*t+phi_ref)
s = a*np.cos(omega*t+phi)
#==============================================================================
## Représentation graphique :
#==============================================================================
nom_de_la_figure = "sinusoide_effet_des_parametres"
fig = plt.figure(nom_de_la_figure)
plt.plot(t,s,label = "Signal")
plt.plot(t,s_ref,'--',label = "Référence")
plt.xlabel('t en s',fontweight = 'bold')
plt.ylabel('s(t) en u.a.',fontweight = 'bold')
plt.grid()
plt.legend()
plt.tight_layout()
fig.savefig(nom_de_la_figure+'.pdf')
| 30.358974
| 80
| 0.454814
|
c79b608aa45609979a8d6a181bce06ee0a5f8b11
| 2,367
|
py
|
Python
|
fulljson/collections.py
|
15045120/FullJSON
|
de4e6fb0e125182bee7619b1bc58ee8c33f8b7c7
|
[
"MIT"
] | 1
|
2020-01-11T18:19:44.000Z
|
2020-01-11T18:19:44.000Z
|
fulljson/collections.py
|
15045120/FullJSON
|
de4e6fb0e125182bee7619b1bc58ee8c33f8b7c7
|
[
"MIT"
] | null | null | null |
fulljson/collections.py
|
15045120/FullJSON
|
de4e6fb0e125182bee7619b1bc58ee8c33f8b7c7
|
[
"MIT"
] | 1
|
2021-04-17T18:49:57.000Z
|
2021-04-17T18:49:57.000Z
|
# -*- coding: UTF-8 -*-
class StackEmptyError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class Stack(object):
def __init__(self):
self.__values = []
self.__index = -1
self.top = None
def push(self, value):
self.__values.append(value)
self.__index = self.__index + 1
self.top = self.__values[-1]
def pop(self):
if self.__index == -1:
raise StackEmptyError('Stack is empty')
value = self.__values.pop()
self.__index = self.__index - 1
if self.__index == -1:
self.top = None
else:
self.top = self.__values[self.__index]
return value
def is_empty(self):
return self.__index == -1
def pop_all(self, end=None):
list = []
while not self.is_empty() and self.top != end:
value = self.pop()
list.insert(0, value)
return list
def print_stack(self):
if self.__index == -1:
print('<>')
else:
__values_str = str(self.__values)
print('<{}>'.format(__values_str[1:len(__values_str)-1]))
def size(self):
return len(self.__values)
class QueueEmptyError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class Queue(object):
def __init__(self):
self.__values = []
def in_queue(self, value):
self.__values.append(value)
def out_queue(self):
if len(self.__values) == 0:
raise QueueEmptyError('Queue is empty')
self.__values.pop(0)
def is_empty(self):
return len(self.__values) == 0
def queue_head(self):
if len(self.__values) == 0:
raise QueueEmptyError('Queue is empty')
return self.__values[0]
def queue_tail(self):
if len(self.__values) == 0:
raise QueueEmptyError('Queue is empty')
return self.__values[-1]
def print_queue(self):
if len(self.__values) == 0:
print('<>')
else:
__values_str = str(self.__values)
print('<{}>'.format(__values_str[1:len(__values_str)-1]))
def size(self):
return len(self.__values)
| 26.3
| 69
| 0.55978
|
309c38396ea7459be624995852928acc3fe56fcc
| 3,308
|
py
|
Python
|
examples/turtlebot-master/goforward_and_avoid_obstacle.py
|
LiuSeeker/Robotica-projeto-1
|
425795d51232470ac840faf9dc7d97863d801554
|
[
"CECILL-B"
] | 1
|
2019-07-14T21:27:21.000Z
|
2019-07-14T21:27:21.000Z
|
examples/turtlebot-master/goforward_and_avoid_obstacle.py
|
LiuSeeker/Robotica-projeto-1
|
425795d51232470ac840faf9dc7d97863d801554
|
[
"CECILL-B"
] | null | null | null |
examples/turtlebot-master/goforward_and_avoid_obstacle.py
|
LiuSeeker/Robotica-projeto-1
|
425795d51232470ac840faf9dc7d97863d801554
|
[
"CECILL-B"
] | null | null | null |
#!/usr/bin/env python
'''
Copyright (c) 2015, Mark Silliman
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#Code is inspired by http://wiki.ros.org/navigation/Tutorials/SendingSimpleGoals (written in C++).
#TurtleBot must have minimal.launch & amcl_demo.launch running prior to starting this script.
import rospy
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib
from actionlib_msgs.msg import *
class GoForwardAvoid():
def __init__(self):
rospy.init_node('nav_test', anonymous=False)
#what to do if shut down (e.g. ctrl + C or failure)
rospy.on_shutdown(self.shutdown)
#tell the action client that we want to spin a thread by default
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("wait for the action server to come up")
#allow up to 5 seconds for the action server to come up
self.move_base.wait_for_server(rospy.Duration(5))
#we'll send a goal to the robot to move 3 meters forward
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'base_link'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = 3.0 #3 meters
goal.target_pose.pose.orientation.w = 1.0 #go forward
#start moving
self.move_base.send_goal(goal)
#allow TurtleBot up to 60 seconds to complete task
success = self.move_base.wait_for_result(rospy.Duration(60))
if not success:
self.move_base.cancel_goal()
rospy.loginfo("The base failed to move forward 3 meters for some reason")
else:
# We made it!
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Hooray, the base moved 3 meters forward")
def shutdown(self):
rospy.loginfo("Stop")
if __name__ == '__main__':
try:
GoForwardAvoid()
except rospy.ROSInterruptException:
rospy.loginfo("Exception thrown")
| 44.106667
| 755
| 0.765115
|
c8b35aca1b223fc9978906fa7ad5d6457caed1ef
| 9,707
|
py
|
Python
|
test/test_client.py
|
pkuyouth/pkuyouth-updater-v2
|
779d11e99bf1847422a77fc69b58980d74346e33
|
[
"MIT"
] | null | null | null |
test/test_client.py
|
pkuyouth/pkuyouth-updater-v2
|
779d11e99bf1847422a77fc69b58980d74346e33
|
[
"MIT"
] | null | null | null |
test/test_client.py
|
pkuyouth/pkuyouth-updater-v2
|
779d11e99bf1847422a77fc69b58980d74346e33
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------
# Project: PKUYouth Webserver v2
# File: test_client.py
# Created Date: 2020-08-03
# Author: Xinghong Zhong
# ---------------------------------------
# Copyright (c) 2020 PKUYouth
import sys
sys.path.append('../')
import os
import re
import time
import pickle
import gzip
from pprint import pprint
from io import BytesIO
from PIL import Image
import pymysql
from lxml import etree
from urllib.parse import urlparse, parse_qsl
from updater.config import UpdaterConfig
from updater.client import MPWXClient, QiniuClient
from updater.image import compress_sm_cover, compress_bg_cover
from updater.utils import jgz_dump, jgz_load, mkdir
from updater.const import CACHE_DIR
from updater.debug import print_request, print_response, dump_response_content,\
print_set_cookies, print_client_cookies, download_static, get_image_size
ADLIST_JSON = os.path.join(CACHE_DIR, "adlist.json.gz")
ADCLIST_JSON = os.path.join(CACHE_DIR, "adclist.json.gz")
IMAGE_CACHE_DIR = os.path.join(CACHE_DIR, "image/")
mkdir(IMAGE_CACHE_DIR)
def test_download_static():
download_static("https://res.wx.qq.com/mpres/zh_CN/htmledition/pages/login/loginscan/loginscan4f932d.js")
download_static("https://res.wx.qq.com/mpres/zh_CN/htmledition/3rd/tajs/tajs492dbc.js")
download_static("https://mp.weixin.qq.com/s?__biz=MzA3NzAzMDEyNg==&mid=200397842&idx=1&sn=959d94ba5a4ff29b6e06a060fc774cf5#rd", "200397842_1.html")
download_static("https://mp.weixin.qq.com/s?__biz=MzA3NzAzMDEyNg==&mid=2650833181&idx=1&sn=f13ff0050b9d77784ae1f96d6ff040f0#rd", "2650833181_1.html")
def test_login(client):
if client.logined:
return
r = client.homepage()
r = client.bizlogin_prelogin()
r = client.bizlogin_startlogin()
r = client.bizlogin_validate()
r = client.loginqrcode_ask()
r = client.loginqrcode_getqrcode()
dump_response_content(r, "loginqrcode.jpg")
buf = BytesIO(r.content)
im = Image.open(buf)
im.show()
current_status = -1
while current_status != 1:
r = client.loginqrcode_ask()
rjson = r.json()
status = rjson['status']
if status == 0:
if current_status != 0:
print("等待扫码")
current_status = status
elif status == 4:
if current_status != 4:
print("等待确认")
current_status = status
elif status == 1:
if current_status != 1:
print("确认登录")
current_status = status
elif status == 2:
raise Exception("管理员已拒绝你的操作申请")
elif status == 3:
raise Exception("操作申请已过期")
else:
pprint(rjson)
print("Unknown Status !")
time.sleep(1.5)
r = client.bizlogin_login()
client.dump_session()
def test_download_articles_list(client):
count = 7
begin = 0
total = -1
adlist = []
while total == -1 or begin <= total:
print("GET newmasssendpage %d/%d" % (begin, total))
r = client.newmasssendpage(count, begin)
rjson = r.json()
total = rjson['total_count']
slist = rjson['sent_list']
for msg in slist:
if msg['type'] != 9:
continue
masssend_time = msg['sent_info']['time']
for m in msg['appmsg_info']:
if m['is_deleted']:
continue
if 'comment_id' not in m and 'copyright_type' not in m:
continue
ad = {
"appmsgid": "{:0>10d}".format(m['appmsgid']),
"title": m['title'],
"cover_url": m['cover'],
"content_url": m['content_url'],
"like_num": m['like_num'],
"read_num": m['read_num'],
"masssend_time": masssend_time,
}
for k, v in parse_qsl(urlparse(m['content_url']).query):
if k in ("idx", "itemidx"):
ad['idx'] = v
if k in ("sn", "sign"):
ad['sn'] = v
assert 'idx' in ad and 'sn' in ad
adlist.append(ad)
begin += count
jgz_dump(adlist, ADLIST_JSON)
client.dump_session()
def test_download_article_content(client, conn):
adlist = jgz_load(ADLIST_JSON)
lastid = '9999999999'
for ad in adlist:
appmsgid = ad['appmsgid']
assert appmsgid <= lastid, (appmsgid, lastid)
lastid = appmsgid
sql = 'SELECT MAX(`appmsgid`) FROM `article` WHERE LENGTH(`appmsgid`) = 10'
with conn.cursor() as cur:
cur.execute(sql)
max_appmsgid = cur.fetchone()[0].zfill(10)
adclist = []
for ad in adlist:
appmsgid = ad['appmsgid']
idx = ad['idx']
if appmsgid <= max_appmsgid:
break
print("GET article_content (%s, %s)" % (appmsgid, idx))
url = ad['content_url']
r = client.article_content(url)
tree = etree.HTML(r.content)
digest = tree.xpath('//head/meta[@name="description"]/@content')
if len(digest) == 0:
digest = None
content = None
else:
digest = digest[0]
content = tree.xpath('//div[@id="js_content"]//text()')
content = ' '.join(s.strip() for s in content if len(s) > 0 and not s.isspace())
adclist.append({
'digest': digest,
'content': content,
**ad,
})
jgz_dump(adclist, ADCLIST_JSON)
def test_update_database(conn):
cur = conn.cursor()
adlist = jgz_load(ADLIST_JSON)
adclist = jgz_load(ADCLIST_JSON)
admap = { (ad['appmsgid'], ad['idx']): ad for ad in adlist }
for ad in adclist:
appmsgid = ad['appmsgid']
idx = ad['idx']
key = (appmsgid, idx)
admap.pop(key)
if ad['digest'] is None and ad['content'] is None:
continue
appmsgid = appmsgid.lstrip('0')
idx = int(idx)
sn = ad['sn']
title = ad['title']
digest = ad['digest']
content = ad['content']
cover_url = ad['cover_url']
content_url = ad['content_url']
like_num = ad['like_num']
read_num = ad['read_num']
masssend_time = ad['masssend_time']
sql = (
'INSERT INTO `article` '
'(`appmsgid`,`idx`,`sn`,`title`,`digest`,`content`,`cover_url`,'
' `content_url`,`like_num`,`read_num`,`masssend_time`) '
'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
)
data = (appmsgid, idx, sn, title, digest, content, cover_url,
content_url, like_num, read_num, masssend_time)
cur.execute(sql, data)
for ad in admap.values():
appmsgid = ad['appmsgid'].lstrip('0')
idx = int(ad['idx'])
like_num = ad['like_num']
read_num = ad['read_num']
sql = (
'UPDATE `article` '
'SET `like_num` = %s,'
' `read_num` = %s '
'WHERE `appmsgid` = %s AND `idx` = %s '
)
data = (like_num, read_num, appmsgid, idx)
cur.execute(sql, data)
cur.close()
conn.commit()
def test_update_static(client, qclient):
adclist = jgz_load(ADCLIST_JSON)
for ad in adclist:
if ad['digest'] is None and ad['content'] is None:
continue
url = ad['cover_url']
r = client.article_cover(url)
buf = BytesIO(r.content)
im = Image.open(buf).convert('RGB')
sim = compress_sm_cover(im)
bim = compress_bg_cover(im)
kwargs = {
'quality': 60
}
osz = get_image_size(im, 'jpeg', **kwargs)
ssz = get_image_size(sim, 'jpeg', **kwargs)
bsz = get_image_size(bim, 'jpeg', **kwargs)
print(osz, ssz, bsz)
key = "%s%s" % (ad['appmsgid'], ad['idx'])
assert len(key) == 11
im.save(os.path.join(IMAGE_CACHE_DIR, "%s.im.jpeg" % key), **kwargs)
sim.save(os.path.join(IMAGE_CACHE_DIR, "%s.sim.jpeg" % key), **kwargs)
bim.save(os.path.join(IMAGE_CACHE_DIR, "%s.bim.jpeg" % key), **kwargs)
smbuf = BytesIO()
sim.save(smbuf, format='jpeg', quality=50)
smdata = smbuf.getvalue()
smkey = "pkuyouth/sm_cover/%s.jpeg" % key
bgbuf = BytesIO()
bim.save(bgbuf, format='jpeg', quality=60)
bgdata = bgbuf.getvalue()
bgkey = "pkuyouth/bg_cover/%s.jpeg" % key
qclient.put_data(smdata, smkey)
qclient.put_data(bgdata, bgkey)
def main():
config = UpdaterConfig()
client = MPWXClient(
username=config.mpwx_username,
password=config.mpwx_password,
)
qclient = QiniuClient(
access_key=config.qiniu_access_key,
secret_key=config.qiniu_secret_key,
bucket=config.qiniu_bucket,
)
conn = pymysql.connect(
host=config.mysql_host,
port=config.mysql_port,
user=config.mysql_user,
password=config.mysql_password,
db=config.mysql_database,
charset=config.mysql_charset,
)
try:
# test_login(client)
# test_download_articles_list(client)
# test_download_article_content(client, conn)
# test_update_database(conn)
# test_update_static(client, qclient)
pass
except:
conn.rollback()
raise
finally:
conn.close()
client.close()
if __name__ == "__main__":
# test_download_static()
main()
| 26.235135
| 153
| 0.563923
|
20099bc28e7c7e4c286dba97ab68334c0f2b66f6
| 4,863
|
py
|
Python
|
test/functional/wallet_importprunedfunds.py
|
Crowntium/crowntium
|
78c17071dbb957d7799d2508448413bdbea09c2d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_importprunedfunds.py
|
Crowntium/crowntium
|
78c17071dbb957d7799d2508448413bdbea09c2d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_importprunedfunds.py
|
Crowntium/crowntium
|
78c17071dbb957d7799d2508448413bdbea09c2d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.crtconfig import *
from test_framework.crt import generatesynchronized
class ImportPrunedFundsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
generatesynchronized(self.nodes[0], COINBASE_MATURITY+1, None, self.nodes)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
# Check only one address
address_info = self.nodes[0].getaddressinfo(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1+COINBASE_MATURITY)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
# Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance()
assert_equal(balance1, Decimal(0))
# Import with affiliated address with no rescan
self.nodes[1].importaddress(address=address2, rescan=False)
self.nodes[1].importprunedfunds(rawtransaction=rawtxn2, txoutproof=proof2)
assert [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
# Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
assert [tx for tx in self.nodes[1].listtransactions() if tx['txid'] == txnid3]
balance3 = self.nodes[1].getbalance()
assert_equal(balance3, Decimal('0.025'))
# Addresses Test - after import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
# Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid1]
self.nodes[1].removeprunedfunds(txnid2)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
self.nodes[1].removeprunedfunds(txnid3)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid3]
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| 40.865546
| 117
| 0.684557
|
bb423a2d86772ee4b9f05382c1b222329578a7ff
| 6,266
|
py
|
Python
|
tests/lib/server.py
|
j420247/pip
|
33cd93cac86690134e8b4874ac42c4e0ce8770d2
|
[
"MIT"
] | 1
|
2019-12-20T05:27:25.000Z
|
2019-12-20T05:27:25.000Z
|
tests/lib/server.py
|
j420247/pip
|
33cd93cac86690134e8b4874ac42c4e0ce8770d2
|
[
"MIT"
] | 7
|
2019-12-27T07:56:50.000Z
|
2022-01-25T03:41:39.000Z
|
tests/lib/server.py
|
j420247/pip
|
33cd93cac86690134e8b4874ac42c4e0ce8770d2
|
[
"MIT"
] | 1
|
2020-02-14T16:53:19.000Z
|
2020-02-14T16:53:19.000Z
|
import os
import signal
import ssl
import threading
from contextlib import contextmanager
from textwrap import dedent
from mock import Mock
from pip._vendor.contextlib2 import nullcontext
from werkzeug.serving import WSGIRequestHandler
from werkzeug.serving import make_server as _make_server
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from types import TracebackType
from typing import (
Any, Callable, Dict, Iterable, List, Optional, Text, Tuple, Type, Union
)
from werkzeug.serving import BaseWSGIServer
Environ = Dict[str, str]
Status = str
Headers = Iterable[Tuple[str, str]]
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
Write = Callable[[bytes], None]
StartResponse = Callable[[Status, Headers, Optional[ExcInfo]], Write]
Body = List[bytes]
Responder = Callable[[Environ, StartResponse], Body]
class MockServer(BaseWSGIServer):
mock = Mock() # type: Mock
# Applies on Python 2 and Windows.
if not hasattr(signal, "pthread_sigmask"):
# We're not relying on this behavior anywhere currently, it's just best
# practice.
blocked_signals = nullcontext
else:
@contextmanager
def blocked_signals():
"""Block all signals for e.g. starting a worker thread.
"""
old_mask = signal.pthread_sigmask(
signal.SIG_SETMASK, range(1, signal.NSIG)
)
try:
yield
finally:
signal.pthread_sigmask(signal.SIG_SETMASK, old_mask)
class _RequestHandler(WSGIRequestHandler):
def make_environ(self):
environ = super(_RequestHandler, self).make_environ()
# From pallets/werkzeug#1469, will probably be in release after
# 0.16.0.
try:
# binary_form=False gives nicer information, but wouldn't be
# compatible with what Nginx or Apache could return.
peer_cert = self.connection.getpeercert(binary_form=True)
if peer_cert is not None:
# Nginx and Apache use PEM format.
environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(
peer_cert,
)
except ValueError:
# SSL handshake hasn't finished.
self.server.log("error", "Cannot fetch SSL peer certificate info")
except AttributeError:
# Not using TLS, the socket will not have getpeercert().
pass
return environ
def _mock_wsgi_adapter(mock):
# type: (Callable[[Environ, StartResponse], Responder]) -> Responder
"""Uses a mock to record function arguments and provide
the actual function that should respond.
"""
def adapter(environ, start_response):
# type: (Environ, StartResponse) -> Body
responder = mock(environ, start_response)
return responder(environ, start_response)
return adapter
def make_mock_server(**kwargs):
# type: (Any) -> MockServer
"""Creates a mock HTTP(S) server listening on a random port on localhost.
The `mock` property of the returned server provides and records all WSGI
interactions, so one approach to testing could be
server = make_mock_server()
server.mock.side_effects = [
page1,
page2,
]
with server_running(server):
# ... use server...
...
assert server.mock.call_count > 0
call_args_list = server.mock.call_args_list
# `environ` is a dictionary defined as per PEP 3333 with the associated
# contents. Additional properties may be added by werkzeug.
environ, _ = call_args_list[0].args
assert environ["PATH_INFO"].startswith("/hello/simple")
Note that the server interactions take place in a different thread, so you
do not want to touch the server.mock within the `server_running` block.
Note also for pip interactions that "localhost" is a "secure origin", so
be careful using this for failure tests of `--trusted-host`.
"""
kwargs.setdefault("request_handler", _RequestHandler)
mock = Mock()
app = _mock_wsgi_adapter(mock)
server = _make_server("localhost", 0, app=app, **kwargs)
server.mock = mock
return server
@contextmanager
def server_running(server):
# type: (BaseWSGIServer) -> None
"""Context manager for running the provided server in a separate thread.
"""
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
with blocked_signals():
thread.start()
try:
yield
finally:
server.shutdown()
thread.join()
# Helper functions for making responses in a declarative way.
def text_html_response(text):
# type: (Text) -> Responder
def responder(environ, start_response):
# type: (Environ, StartResponse) -> Body
start_response("200 OK", [
("Content-Type", "text/html; charset=UTF-8"),
])
return [text.encode('utf-8')]
return responder
def html5_page(text):
# type: (Union[Text, str]) -> Text
return dedent(u"""
<!DOCTYPE html>
<html>
<body>
{}
</body>
</html>
""").strip().format(text)
def index_page(spec):
# type: (Dict[str, str]) -> Responder
def link(name, value):
return '<a href="{}">{}</a>'.format(
value, name
)
links = ''.join(link(*kv) for kv in spec.items())
return text_html_response(html5_page(links))
def package_page(spec):
# type: (Dict[str, str]) -> Responder
def link(name, value):
return '<a href="{}">{}</a>'.format(
value, name
)
links = ''.join(link(*kv) for kv in spec.items())
return text_html_response(html5_page(links))
def file_response(path):
# type: (str) -> Responder
def responder(environ, start_response):
# type: (Environ, StartResponse) -> Body
size = os.stat(path).st_size
start_response(
"200 OK", [
("Content-Type", "application/octet-stream"),
("Content-Length", str(size)),
],
)
with open(path, 'rb') as f:
return [f.read()]
return responder
| 29.41784
| 79
| 0.63262
|
08ef496b3d7ebf85fec2d743cdf2f72b128e1e55
| 13,677
|
py
|
Python
|
src/oscar/apps/dashboard/views.py
|
guidoaaroni/arandu
|
e1553b21516f38fd2fb10cf65204541efd3c8b54
|
[
"BSD-3-Clause"
] | 3
|
2020-03-30T13:11:57.000Z
|
2020-04-22T13:55:31.000Z
|
src/oscar/apps/dashboard/views.py
|
guidoaaroni/arandu
|
e1553b21516f38fd2fb10cf65204541efd3c8b54
|
[
"BSD-3-Clause"
] | 9
|
2020-10-29T08:03:28.000Z
|
2021-09-08T01:21:10.000Z
|
src/oscar/apps/dashboard/views.py
|
guidoaaroni/arandu
|
e1553b21516f38fd2fb10cf65204541efd3c8b54
|
[
"BSD-3-Clause"
] | 2
|
2021-01-06T19:25:07.000Z
|
2021-05-14T02:00:19.000Z
|
import json
from datetime import timedelta
from decimal import Decimal as D
from decimal import ROUND_UP
from django.db.models import Avg, Count, Sum
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.timezone import now
from django.views.generic import TemplateView
from oscar.apps.promotions.models import AbstractPromotion
from oscar.core.compat import get_user_model
from oscar.core.loading import get_class, get_model
RelatedFieldWidgetWrapper = get_class('dashboard.widgets', 'RelatedFieldWidgetWrapper')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
Basket = get_model('basket', 'Basket')
StockAlert = get_model('partner', 'StockAlert')
Product = get_model('catalogue', 'Product')
Order = get_model('order', 'Order')
Line = get_model('order', 'Line')
User = get_user_model()
class IndexView(TemplateView):
"""
An overview view which displays several reports about the shop.
Supports the permission-based dashboard. It is recommended to add a
index_nonstaff.html template because Oscar's default template will
display potentially sensitive store information.
"""
def get_template_names(self):
if self.request.user.is_staff:
return ['dashboard/index.html', ]
else:
return ['dashboard/index_nonstaff.html', 'dashboard/index.html']
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
ctx.update(self.get_stats())
return ctx
def get_active_site_offers(self):
"""
Return active conditional offers of type "site offer". The returned
``Queryset`` of site offers is filtered by end date greater then
the current date.
"""
return ConditionalOffer.objects.filter(
end_datetime__gt=now(), offer_type=ConditionalOffer.SITE)
def get_active_vouchers(self):
"""
Get all active vouchers. The returned ``Queryset`` of vouchers
is filtered by end date greater then the current date.
"""
return Voucher.objects.filter(end_datetime__gt=now())
def get_number_of_promotions(self, abstract_base=AbstractPromotion):
"""
Get the number of promotions for all promotions derived from
*abstract_base*. All subclasses of *abstract_base* are queried
and if another abstract base class is found this method is executed
recursively.
"""
total = 0
for cls in abstract_base.__subclasses__():
if cls._meta.abstract:
total += self.get_number_of_promotions(cls)
else:
total += cls.objects.count()
return total
def get_open_baskets(self, filters=None):
"""
Get all open baskets. If *filters* dictionary is provided they will
be applied on all open baskets and return only filtered results.
"""
if filters is None:
filters = {}
filters['status'] = Basket.OPEN
return Basket.objects.filter(**filters)
def get_hourly_report(self, hours=24, segments=10):
"""
Get report of order revenue split up in hourly chunks. A report is
generated for the last *hours* (default=24) from the current time.
The report provides ``max_revenue`` of the hourly order revenue sum,
``y-range`` as the labeling for the y-axis in a template and
``order_total_hourly``, a list of properties for hourly chunks.
*segments* defines the number of labeling segments used for the y-axis
when generating the y-axis labels (default=10).
"""
# Get datetime for 24 hours agao
time_now = now().replace(minute=0, second=0)
start_time = time_now - timedelta(hours=hours - 1)
orders_last_day = Order.objects.filter(date_placed__gt=start_time)
order_total_hourly = []
for hour in range(0, hours, 2):
end_time = start_time + timedelta(hours=2)
hourly_orders = orders_last_day.filter(date_placed__gt=start_time,
date_placed__lt=end_time)
total = hourly_orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.0')
order_total_hourly.append({
'end_time': end_time,
'total_incl_tax': total
})
start_time = end_time
max_value = max([x['total_incl_tax'] for x in order_total_hourly])
divisor = 1
while divisor < max_value / 50:
divisor *= 10
max_value = (max_value / divisor).quantize(D('1'), rounding=ROUND_UP)
max_value *= divisor
if max_value:
segment_size = (max_value) / D('100.0')
for item in order_total_hourly:
item['percentage'] = int(item['total_incl_tax'] / segment_size)
y_range = []
y_axis_steps = max_value / D(str(segments))
for idx in reversed(range(segments + 1)):
y_range.append(idx * y_axis_steps)
else:
y_range = []
for item in order_total_hourly:
item['percentage'] = 0
ctx = {
'order_total_hourly': order_total_hourly,
'max_revenue': max_value,
'y_range': y_range,
}
return ctx
def get_stats(self):
datetime_24hrs_ago = now() - timedelta(hours=24)
orders = Order.objects.all()
orders_last_day = orders.filter(date_placed__gt=datetime_24hrs_ago)
open_alerts = StockAlert.objects.filter(status=StockAlert.OPEN)
closed_alerts = StockAlert.objects.filter(status=StockAlert.CLOSED)
total_lines_last_day = Line.objects.filter(
order__in=orders_last_day).count()
stats = {
'total_orders_last_day': orders_last_day.count(),
'total_lines_last_day': total_lines_last_day,
'average_order_costs': orders_last_day.aggregate(
Avg('total_incl_tax')
)['total_incl_tax__avg'] or D('0.00'),
'total_revenue_last_day': orders_last_day.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'hourly_report_dict': self.get_hourly_report(hours=24),
'total_customers_last_day': User.objects.filter(
date_joined__gt=datetime_24hrs_ago,
).count(),
'total_open_baskets_last_day': self.get_open_baskets({
'date_created__gt': datetime_24hrs_ago
}).count(),
'total_products': Product.objects.count(),
'total_open_stock_alerts': open_alerts.count(),
'total_closed_stock_alerts': closed_alerts.count(),
'total_site_offers': self.get_active_site_offers().count(),
'total_vouchers': self.get_active_vouchers().count(),
'total_promotions': self.get_number_of_promotions(),
'total_customers': User.objects.count(),
'total_open_baskets': self.get_open_baskets().count(),
'total_orders': orders.count(),
'total_lines': Line.objects.count(),
'total_revenue': orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'order_status_breakdown': orders.order_by(
'status'
).values('status').annotate(freq=Count('id'))
}
return stats
class PopUpWindowCreateUpdateMixin(object):
def get_context_data(self, **kwargs):
ctx = super(PopUpWindowCreateUpdateMixin, self).get_context_data(**kwargs)
if RelatedFieldWidgetWrapper.TO_FIELD_VAR in self.request.GET or RelatedFieldWidgetWrapper.TO_FIELD_VAR in self.request.POST:
to_field = self.request.GET.get(RelatedFieldWidgetWrapper.TO_FIELD_VAR,
self.request.POST.get(RelatedFieldWidgetWrapper.TO_FIELD_VAR))
ctx['to_field'] = to_field
ctx['to_field_var'] = RelatedFieldWidgetWrapper.TO_FIELD_VAR
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.GET or RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.POST:
is_popup = self.request.GET.get(RelatedFieldWidgetWrapper.IS_POPUP_VAR,
self.request.POST.get(RelatedFieldWidgetWrapper.IS_POPUP_VAR))
ctx['is_popup'] = is_popup
ctx['is_popup_var'] = RelatedFieldWidgetWrapper.IS_POPUP_VAR
return ctx
def forms_valid(self, form, formset):
# So that base view classes can do pop-up window specific things, like
# not displaying notification messages using the messages framework
self.is_popup = False
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.POST:
self.is_popup = True
return super(PopUpWindowCreateUpdateMixin, self).forms_valid(form, formset)
class PopUpWindowCreateMixin(PopUpWindowCreateUpdateMixin):
# form_valid and form_invalid are called, depending on the validation
# result of just the form, and return a redirect to the success URL or
# redisplay the form, respectively. In both cases we need to check our
# formsets as well, so both methods should do the same.
# If both the form and formset are valid, then they should call
# forms_valid, which should be defined in the base view class, to in
# addition save the formset, and return a redirect to the success URL.
def forms_valid(self, form, formset):
response = super(PopUpWindowCreateMixin, self).forms_valid(form, formset)
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.POST:
obj = form.instance
to_field = self.request.POST.get(RelatedFieldWidgetWrapper.TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'value': six.text_type(value),
'obj': six.text_type(obj),
})
return TemplateResponse(self.request, 'dashboard/widgets/popup_response.html', {
'popup_response_data': popup_response_data,
})
else:
return response
class PopUpWindowUpdateMixin(PopUpWindowCreateUpdateMixin):
# form_valid and form_invalid are called, depending on the validation
# result of just the form, and return a redirect to the success URL or
# redisplay the form, respectively. In both cases we need to check our
# formsets as well, so both methods should do the same.
# If both the form and formset are valid, then they should call
# forms_valid, which should be defined in the base view class, to in
# addition save the formset, and return a redirect to the success URL.
def forms_valid(self, form, formset):
response = super(PopUpWindowUpdateMixin, self).forms_valid(form, formset)
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.POST:
obj = form.instance
opts = obj._meta
to_field = self.request.POST.get(RelatedFieldWidgetWrapper.TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = opts.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = self.request.resolver_match.kwargs['pk']
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'action': 'change',
'value': six.text_type(value),
'obj': six.text_type(obj),
'new_value': six.text_type(new_value),
})
return TemplateResponse(self.request, 'dashboard/widgets/popup_response.html', {
'popup_response_data': popup_response_data,
})
else:
return response
class PopUpWindowDeleteMixin(object):
def get_context_data(self, **kwargs):
ctx = super(PopUpWindowDeleteMixin, self).get_context_data(**kwargs)
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.GET:
ctx['is_popup'] = self.request.GET.get(RelatedFieldWidgetWrapper.IS_POPUP_VAR)
ctx['is_popup_var'] = RelatedFieldWidgetWrapper.IS_POPUP_VAR
return ctx
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL, or closes the popup, it it is one.
"""
# So that base view classes can do pop-up window specific things, like
# not displaying notification messages using the messages framework
self.is_popup = False
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in self.request.POST:
self.is_popup = True
obj = self.get_object()
response = super(PopUpWindowDeleteMixin, self).delete(request, *args, **kwargs)
if RelatedFieldWidgetWrapper.IS_POPUP_VAR in request.POST:
obj_id = obj.pk
popup_response_data = json.dumps({
'action': 'delete',
'value': six.text_type(obj_id),
})
return TemplateResponse(request, 'dashboard/widgets/popup_response.html', {
'popup_response_data': popup_response_data,
})
else:
return response
| 40.705357
| 133
| 0.640638
|
b1ee27e7257a1b5c0e6a0d76ace03f169791658b
| 1,572
|
py
|
Python
|
src/transformers/models/__init__.py
|
JosephCatrambone/transformers
|
eca241861ecadbbe22103d347b51e8df89c46cd3
|
[
"Apache-2.0"
] | 39
|
2021-04-30T06:06:30.000Z
|
2022-03-12T11:56:06.000Z
|
src/transformers/models/__init__.py
|
JosephCatrambone/transformers
|
eca241861ecadbbe22103d347b51e8df89c46cd3
|
[
"Apache-2.0"
] | 3
|
2021-08-19T09:56:35.000Z
|
2021-12-05T20:30:08.000Z
|
src/transformers/models/__init__.py
|
JosephCatrambone/transformers
|
eca241861ecadbbe22103d347b51e8df89c46cd3
|
[
"Apache-2.0"
] | 30
|
2021-04-30T07:11:22.000Z
|
2022-03-15T19:34:58.000Z
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import (
albert,
auto,
bart,
barthez,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
blenderbot,
blenderbot_small,
camembert,
convbert,
cpm,
ctrl,
deberta,
deit,
dialogpt,
distilbert,
dpr,
electra,
encoder_decoder,
flaubert,
fsmt,
funnel,
gpt2,
gpt_neo,
herbert,
layoutlm,
led,
longformer,
luke,
lxmert,
m2m_100,
marian,
mbart,
megatron_bert,
mmbt,
mobilebert,
mpnet,
mt5,
openai,
pegasus,
phobert,
prophetnet,
rag,
reformer,
retribert,
roberta,
speech_to_text,
squeezebert,
t5,
tapas,
transfo_xl,
vit,
wav2vec2,
xlm,
xlm_roberta,
xlnet,
)
| 19.65
| 77
| 0.647583
|
3d9cdee7f07f967e53dfb45d77dfa1f9819237fc
| 1,301
|
py
|
Python
|
script/update_golang_x.py
|
jamestutton/beats
|
913f7eeaa76dcd072854b2696b32cde78763599e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
script/update_golang_x.py
|
jamestutton/beats
|
913f7eeaa76dcd072854b2696b32cde78763599e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
script/update_golang_x.py
|
jamestutton/beats
|
913f7eeaa76dcd072854b2696b32cde78763599e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import json
import os
import argparse
import subprocess
def update(pkg_name):
"""Call govendor on the targeted golang/x packages"""
vendor_file = os.path.join('vendor', 'vendor.json')
target = 'golang.org/x/{}'.format(pkg_name)
with open(vendor_file) as content:
deps = json.load(content)
packages = [dep['path'] for dep in deps['package'] if dep['path'].startswith(target)]
revision = '@{revision}'.format(revision=args.revision) if args.revision else ''
packages = ['{pkg}{revision}'.format(pkg=pkg, revision=revision) for pkg in packages]
cmd = ['govendor', 'fetch'] + packages
if args.verbose:
print(' '.join(cmd))
subprocess.check_call(cmd)
def get_parser():
"""Creates parser to parse script params
"""
parser = argparse.ArgumentParser(description="Update golang.org/x/<name> in vendor folder")
parser.add_argument('-q', '--quiet', dest='verbose', action='store_false', help='work quietly')
parser.add_argument('--revision', help='update deps to this revision', default='')
parser.add_argument('name', help='name of the golang.org/x/ package. Can be empty', default='', nargs='?')
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
update(args.name)
| 32.525
| 110
| 0.671022
|
13e0eaab364e7e32b87ace24ff7e82603eebf3a2
| 64,587
|
py
|
Python
|
correios/models/data.py
|
Gikeda2016/Correios-olist
|
4906175bad289b3164ebebb73e5ee16450784fac
|
[
"Apache-2.0"
] | null | null | null |
correios/models/data.py
|
Gikeda2016/Correios-olist
|
4906175bad289b3164ebebb73e5ee16450784fac
|
[
"Apache-2.0"
] | null | null | null |
correios/models/data.py
|
Gikeda2016/Correios-olist
|
4906175bad289b3164ebebb73e5ee16450784fac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from decimal import Decimal
from typing import Dict, Tuple # noqa
from ..utils import RangeSet
TRACKING_PREFIX = {
"AL": "Agentes de leitura",
"AR": "Avisos de recebimento",
"AS": "PAC - Ação Social",
"CA": "Encomenda Internacional - Colis",
"CB": "Encomenda Internacional - Colis",
"CC": "Encomenda Internacional - Colis",
"CD": "Encomenda Internacional - Colis",
"CE": "Encomenda Internacional - Colis",
"CF": "Encomenda Internacional - Colis",
"CG": "Encomenda Internacional - Colis",
"CH": "Encomenda Internacional - Colis",
"CI": "Encomenda Internacional - Colis",
"CJ": "Encomenda Internacional - Colis",
"CK": "Encomenda Internacional - Colis",
"CL": "Encomenda Internacional - Colis",
"CM": "Encomenda Internacional - Colis",
"CN": "Encomenda Internacional - Colis",
"CO": "Encomenda Internacional - Colis",
"CP": "Encomenda Internacional - Colis",
"CQ": "Encomenda Internacional - Colis",
"CR": "Carta registrada sem Valor Declarado",
"CS": "Encomenda Internacional - Colis",
"CT": "Encomenda Internacional - Colis",
"CU": "Encomenda internacional - Colis",
"CV": "Encomenda Internacional - Colis",
"CW": "Encomenda Internacional - Colis",
"CX": "Encomenda internacional - Colis ou Selo Lacre para Caixetas",
"CY": "Encomenda Internacional - Colis",
"CZ": "Encomenda Internacional - Colis",
"DA": "SEDEX ou Remessa Expressa com AR Digital",
"DB": "SEDEX ou Remessa Expressa com AR Digital (Bradesco)",
"DC": "Remessa Expressa CRLV/CRV/CNH e Notificações",
"DD": "Devolução de documentos",
"DE": "Remessa Expressa Talão/Cartão com AR",
"DF": "e-SEDEX",
"DG": "SEDEX",
"DI": "SEDEX ou Remessa Expressa com AR Digital (Itaú)",
"DJ": "SEDEX",
"DK": "PAC Extra Grande",
"DL": "SEDEX",
"DM": "e-SEDEX",
"DN": "SEDEX",
"DO": "SEDEX ou Remessa Expressa com AR Digital (Itaú)",
"DP": "SEDEX Pagamento na Entrega",
"DQ": "SEDEX ou Remessa Expressa com AR Digital (Santander)",
"DR": "Remessa Expressa com AR Digital (Santander)",
"DS": "SEDEX ou Remessa Expressa com AR Digital (Santander)",
"DT": "Remessa econômica com AR Digital (DETRAN)",
"DU": "e-SEDEX",
"DV": "SEDEX c/ AR digital",
"DW": "Encomenda SEDEX (Etiqueta Lógica)",
"DX": "SEDEX 10",
"EA": "Encomenda Internacional - EMS",
"EB": "Encomenda Internacional - EMS",
"EC": "PAC",
"ED": "Packet Express",
"EE": "Encomenda Internacional - EMS",
"EF": "Encomenda Internacional - EMS",
"EG": "Encomenda Internacional - EMS",
"EH": "Encomenda Internacional - EMS ou Encomenda com AR Digital",
"EI": "Encomenda Internacional - EMS",
"EJ": "Encomenda Internacional - EMS",
"EK": "Encomenda Internacional - EMS",
"EL": "Encomenda Internacional - EMS",
"EM": "Encomenda Internacional - SEDEX Mundi", # BR Suffix
# "EM": "Encomenda Internacional - EMS Importação",
"EN": "Encomenda Internacional - EMS",
"EO": "Encomenda Internacional - EMS",
"EP": "Encomenda Internacional - EMS",
"EQ": "Encomenda de serviço não expressa (ECT)",
"ER": "Objeto registrado",
"ES": "e-SEDEX ou EMS",
"ET": "Encomenda Internacional - EMS",
"EU": "Encomenda Internacional - EMS",
"EV": "Encomenda Internacional - EMS",
"EW": "Encomenda Internacional - EMS",
"EX": "Encomenda Internacional - EMS",
"EY": "Encomenda Internacional - EMS",
"EZ": "Encomenda Internacional - EMS",
"FA": "FAC registrado",
"FE": "Encomenda FNDE",
"FF": "Objeto registrado (DETRAN)",
"FH": "FAC registrado com AR Digital",
"FM": "FAC monitorado",
"FR": "FAC registrado",
"IA": "Logística Integrada (agendado / avulso)",
"IC": "Logística Integrada (a cobrar)",
"ID": "Logística Integrada (devolução de documento)",
"IE": "Logística Integrada (Especial)",
"IF": "CPF",
"II": "Logística Integrada (ECT)",
"IK": "Logística Integrada com Coleta Simultânea",
"IM": "Logística Integrada (Medicamentos)",
"IN": "Correspondência e EMS recebido do Exterior",
"IP": "Logística Integrada (Programada)",
"IR": "Impresso Registrado",
"IS": "Logística integrada standard (medicamentos)",
"IT": "Remessa Expressa Medicamentos / Logística Integrada Termolábil",
"IU": "Logística Integrada (urgente)",
"IX": "EDEI Expresso",
"JA": "Remessa econômica com AR Digital",
"JB": "Remessa econômica com AR Digital",
"JC": "Remessa econômica com AR Digital",
"JD": "Remessa econômica Talão/Cartão",
"JE": "Remessa econômica com AR Digital",
"JF": "Remessa econômica com AR Digital",
"JG": "Objeto registrado urgente/prioritário",
"JH": "Objeto registrado urgente / prioritário",
"JI": "Remessa econômica Talão/Cartão",
"JJ": "Objeto registrado (Justiça)",
"JK": "Remessa econômica Talão/Cartão",
"JL": "Objeto registrado",
"JM": "Mala Direta Postal Especial",
"JN": "Objeto registrado econômico",
"JO": "Objeto registrado urgente",
"JP": "Receita Federal",
"JQ": "Remessa econômica com AR Digital",
"JR": "Objeto registrado urgente / prioritário",
"JS": "Objeto registrado",
"JT": "Objeto Registrado Urgente",
"JV": "Remessa Econômica (c/ AR DIGITAL)",
"LA": "SEDEX com Logística Reversa Simultânea em Agência",
"LB": "e-SEDEX com Logística Reversa Simultânea em Agência",
"LC": "Objeto Internacional (Prime)",
"LE": "Logística Reversa Econômica",
"LF": "Objeto Internacional (Prime)",
"LI": "Objeto Internacional (Prime)",
"LJ": "Objeto Internacional (Prime)",
"LK": "Objeto Internacional (Prime)",
"LM": "Objeto Internacional (Prime)",
"LN": "Objeto Internacional (Prime)",
"LP": "PAC com Logística Reversa Simultânea em Agência",
"LS": "SEDEX Logística Reversa",
"LV": "Logística Reversa Expressa",
"LX": "Packet Standard / Econômica",
"LZ": "Objeto Internacional (Prime)",
"MA": "Serviços adicionais do Telegrama",
"MB": "Telegrama (balcão)",
"MC": "Telegrama (Fonado)",
"MD": "SEDEX Mundi (Documento interno)",
"ME": "Telegrama",
"MF": "Telegrama (Fonado)",
"MK": "Telegrama (corporativo)",
"ML": "Fecha Malas (Rabicho)",
"MM": "Telegrama (Grandes clientes)",
"MP": "Telegrama (Pré-pago)",
"MR": "AR digital",
"MS": "Encomenda Saúde",
"MT": "Telegrama (Telemail)",
"MY": "Telegrama internacional (entrante)",
"MZ": "Telegrama (Correios Online)",
"NE": "Tele Sena resgatada",
"NX": "EDEI Econômico (não urgente)",
"PA": "Passaporte",
"PB": "PAC",
"PC": "PAC a Cobrar",
"PD": "PAC",
"PE": "PAC",
"PF": "Passaporte",
"PG": "PAC",
"PH": "PAC",
"PI": "PAC",
"PJ": "PAC",
"PK": "PAC Extra Grande",
"PL": "PAC",
"PN": "PAC Normal",
"PR": "Reembolso Postal",
"QQ": "Objeto de teste (SIGEP Web)",
"RA": "Objeto registrado / prioritário",
"RB": "Carta registrada",
"RC": "Carta registrada com Valor Declarado",
"RD": "Remessa econômica ou objeto registrado (DETRAN)",
"RE": "Objeto registrado econômico",
"RF": "Receita Federal",
"RG": "Objeto registrado",
"RH": "Objeto registrado com AR Digital",
"RI": "Objeto registrado internacional prioritário",
"RJ": "Objeto registrado",
"RK": "Objeto registrado",
"RL": "Objeto registrado",
"RM": "Objeto registrado urgente",
"RN": "Objeto registrado (SIGEPWEB ou Agência)",
"RO": "Objeto registrado",
"RP": "Reembolso Postal",
"RQ": "Objeto registrado",
"RR": "Objeto registrado",
"RS": "Objeto registrado",
"RT": "Remessa econômica Talão/Cartão",
"RU": "Objeto registrado (ECT)",
"RV": "Remessa econômica CRLV/CRV/CNH e Notificações com AR Digital",
"RW": "Objeto internacional",
"RX": "Objeto internacional",
"RY": "Remessa econômica Talão/Cartão com AR Digital",
"RZ": "Objeto registrado",
"SA": "SEDEX",
"SB": "SEDEX 10",
"SC": "SEDEX a cobrar",
"SD": "SEDEX ou Remessa Expressa (DETRAN)",
"SE": "SEDEX",
"SF": "SEDEX",
"SG": "SEDEX",
"SH": "SEDEX com AR Digital / SEDEX ou AR Digital",
"SI": "SEDEX",
"SJ": "SEDEX Hoje",
"SK": "SEDEX",
"SL": "SEDEX",
"SM": "SEDEX 12",
"SN": "SEDEX",
"SO": "SEDEX",
"SP": "SEDEX Pré-franqueado",
"SQ": "SEDEX",
"SR": "SEDEX",
"SS": "SEDEX",
"ST": "Remessa Expressa Talão/Cartão",
"SU": "Encomenda de serviço expressa (ECT)",
"SV": "Remessa Expressa CRLV/CRV/CNH e Notificações com AR Digital",
"SW": "e-SEDEX",
"SX": "SEDEX 10",
"SY": "Remessa Expressa Talão/Cartão com AR Digital",
"SZ": "SEDEX",
"TC": "Objeto para treinamento",
"TE": "Objeto para treinamento",
"TS": "Objeto para treinamento",
"VA": "Encomendas com valor declarado",
"VC": "Encomendas",
"VD": "Encomendas com valor declarado",
"VE": "Encomendas",
"VF": "Encomendas com valor declarado",
"VV": "Objeto internacional",
"XA": "Aviso de chegada (internacional)",
"XM": "SEDEX Mundi",
"XR": "Encomenda SUR Postal Expresso",
"XX": "Encomenda SUR Postal 24 horas",
}
EXTRA_SERVICES = {
1: {"code": "AR", "name": "Aviso de Recebimento", "display_on_label": True},
2: {"code": "MP", "name": "Mão Própria Nacional", "display_on_label": True},
19: {"code": "VD", "name": "Valor Declarado (Encomendas)", "display_on_label": True}, # Sedex
25: {"code": "RR", "name": "Registro Nacional", "display_on_label": False},
64: {"code": "VD", "name": "Valor Declarado (Encomendas)", "display_on_label": True}, # PAC
}
EXTRA_SERVICE_AR = 1
EXTRA_SERVICE_MP = 2
EXTRA_SERVICE_VD_SEDEX = 19
EXTRA_SERVICE_RR = 25
EXTRA_SERVICE_VD_PAC = 64
EXTRA_SERVICE_VD = EXTRA_SERVICE_VD_SEDEX # For backward compatibility
SERVICES = {
"40215": {
"id": 104707,
"description": "SEDEX 10",
"category": "SERVICO_COM_RESTRICAO",
"max_weight": 10000,
"display_name": "SEDEX 10",
"symbol": "premium",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"81019": {
"id": 104672,
"description": "E-SEDEX STANDARD",
"category": "SERVICO_COM_RESTRICAO",
"max_weight": 15000,
"display_name": "E-SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"41068": {
"id": 109819,
"description": "PAC CONTRATO AGENCIA",
"category": "PAC",
"display_name": "PAC",
"max_weight": 30000,
"symbol": "standard",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("3000.00"),
},
"04669": {
"id": 124884,
"description": "PAC",
"category": "PAC",
"display_name": "PAC",
"max_weight": 30000,
"symbol": "standard",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("3000.00"),
},
"40444": {
"id": 109811,
"description": "SEDEX - CONTRATO",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40436": {
"id": 109810,
"description": "SEDEX - CONTRATO",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40096": {
"id": 104625,
"description": "SEDEX (CONTRATO)",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"04162": {
"id": 124849,
"description": "SEDEX CONTRATO AGENCIA",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40380": {
"id": 109806,
"description": "SEDEX REVERSO 40096",
"category": "REVERSO",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40010": {
"id": 104295,
"description": "SEDEX A VISTA",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"41211": {
"id": 113546,
"description": "PAC - CONTRATO",
"category": "PAC",
"display_name": "PAC",
"max_weight": 30000,
"symbol": "standard",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("3000.00"),
},
"40630": {
"id": 114976,
"description": "SEDEX PAGAMENTO NA ENTREGA -",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"04316": {
"id": 124900,
"description": "SEDEX CONTRATO - UO",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40916": {
"id": 118568,
"description": "SEDEX AGRUPADO II",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40908": {
"id": 118567,
"description": "SEDEX AGRUPADO I",
"category": "SEDEX",
"max_weight": 30000,
"display_name": "SEDEX",
"symbol": "express",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"41300": {
"id": 120366,
"description": "PAC GRANDES FORMATOS",
"category": "SERVICO_COM_RESTRICAO",
"max_weight": 50000,
"display_name": "PAC",
"symbol": "standard",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("3000.00"),
},
"04812": {
"id": 124899,
"description": "PAC CONTRATO - UO",
"category": "PAC",
"display_name": "PAC",
"max_weight": 30000,
"symbol": "standard",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("3000.00"),
},
"40169": {
"id": 115218,
"description": "SEDEX 12",
"category": "SERVICO_COM_RESTRICAO",
"max_weight": 10000,
"display_name": "SEDEX 12",
"symbol": "premium",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"40290": {
"id": 108934,
"description": "SEDEX HOJE",
"category": "SERVICO_COM_RESTRICAO",
"max_weight": 10000,
"display_name": "SEDEX Hoje",
"symbol": "premium",
"default_extra_services": [EXTRA_SERVICE_RR],
"min_declared_value": Decimal("19.50"),
"max_declared_value": Decimal("10000.00"),
},
"10154": {
"id": 118424,
"description": "CARTA COMERCIAL REGISTRADA",
"category": "CARTA_REGISTRADA",
"display_name": "Carta Registrada",
},
"41246": {
"id": 115487,
"description": "REM. CAMPANHA PAPAI NOEL DOS",
"category": "SEM_CATEGORIA",
"display_name": "Papai Noel dos Correios",
},
"40150": {
"id": 115136,
"description": "SERVICO DE PROTOCOLO POSTAL -",
"category": "SEDEX",
"display_name": "Protocolo",
},
"10065": {
"id": 109480,
"description": "CARTA COMERCIAL A FATURAR",
"category": "CARTA_REGISTRADA",
"display_name": "Carta Comercial",
},
} # type: Dict[str, dict]
SERVICE_PAC = "04669"
SERVICE_PAC_INDUSTRIAL = "04812"
SERVICE_SEDEX = "04162"
SERVICE_SEDEX_INDUSTRIAL = "04316"
SERVICE_SEDEX10 = "40215"
SERVICE_SEDEX12 = "40169"
SERVICE_E_SEDEX = "81019"
INSURANCE_VALUE_THRESHOLD_PAC = Decimal("19.50") # R$
INSURANCE_VALUE_THRESHOLD_SEDEX = Decimal("19.50") # R$
INSURANCE_PERCENTUAL_COST = Decimal("0.01") # 1%
REGIONAL_DIRECTIONS = {
1: {"code": "AC", "name": "AC - ADMINISTRAÇAO CENTRAL"},
3: {"code": "ACR", "name": "DR - ACRE"},
4: {"code": "AL", "name": "DR - ALAGOAS"},
6: {"code": "AM", "name": "DR - AMAZONAS"},
5: {"code": "AP", "name": "DR - AMAPÁ"},
8: {"code": "BA", "name": "DR - BAHIA"},
10: {"code": "BSB", "name": "DR - BRASÍLIA"},
12: {"code": "CE", "name": "DR - CEARÁ"},
14: {"code": "ES", "name": "DR - ESPIRITO SANTO"},
16: {"code": "GO", "name": "DR - GOIÁS"},
18: {"code": "MA", "name": "DR - MARANHÃO"},
20: {"code": "MG", "name": "DR - MINAS GERAIS"},
22: {"code": "MS", "name": "DR - MATO GROSSO DO SUL"},
24: {"code": "MT", "name": "DR - MATO GROSSO"},
28: {"code": "PA", "name": "DR - PARÁ"},
30: {"code": "PB", "name": "DR - PARAÍBA"},
32: {"code": "PE", "name": "DR - PERNAMBUCO"},
34: {"code": "PI", "name": "DR - PIAUÍ"},
36: {"code": "PR", "name": "DR - PARANÁ"},
50: {"code": "RJ", "name": "DR - RIO DE JANEIRO"},
60: {"code": "RN", "name": "DR - RIO GRANDE DO NORTE"},
26: {"code": "RO", "name": "DR - RONDONIA"},
65: {"code": "RR", "name": "DR - RORAIMA"},
64: {"code": "RS", "name": "DR - RIO GRANDE DO SUL"},
68: {"code": "SC", "name": "DR - SANTA CATARINA"},
70: {"code": "SE", "name": "DR - SERGIPE"},
74: {"code": "SPI", "name": "DR - SÃO PAULO INTERIOR"},
72: {"code": "SPM", "name": "DR - SÃO PAULO"},
75: {"code": "TO", "name": "DR - TOCANTINS"},
}
TRACKING_EVENT_TYPES = {
"ERROR": "Evento de erro", # custom event type for "Not Found" error
"BDE": "Baixa de distribuição externa",
"BDI": "Baixa de distribuição interna",
"BDR": "Baixa corretiva",
"BLQ": "Bloqueio de objetos",
"CAR": "Conferência de lista de registro",
"CD": "Conferência de nota de despacho",
"CMT": "Chegada de um meio de transporte",
"CO": "Coleta de objetos",
"CUN": "Conferência de lista de registro",
"DO": "Expedição de nota de despacho",
"EST": "Estorno",
"FC": "Função complementar",
"IDC": "Indenização de objetos",
"LDI": "Lista de distribuição interna",
"LDE": "Lista de distribuição externa",
"OEC": "Lista de Objetos Entregues ao Carteiro",
"PAR": "Conferência Unidade Internacional",
"PMT": "Partida Meio de Transporte",
"PO": "Postagem (exceção)",
"RO": "Expedição de Lista de Registro",
"TRI": "Triagem",
"CMR": "Conferência de lista de registro",
}
TRACKING_STATUS = {
("ERROR", 0): ("error", "Objeto não encontrado", "", ""), # custom status for "Not Found" error
("BDE", 0): (
"delivered",
"Objeto entregue ao destinatário",
"Recebido por:",
"Finalizar a entrega. Não é mais necessário prosseguir com o acompanhamento.",
),
("BDI", 0): (
"delivered",
"Objeto entregue ao destinatário",
"Recebido por:",
"Finalizar a entrega. Não é mais necessário prosseguir com o acompanhamento.",
),
("BDR", 0): (
"delivered",
"Objeto entregue ao destinatário",
"Recebido por:",
"Finalizar a entrega. Não é mais necessário prosseguir com o acompanhamento.",
),
("CAR", 1): ("shipped", "Conferido", "Recebido na unidade de destino", "Acompanhar"),
("CD", 0): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("CMT", 0): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("CUN", 0): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("DO", 0): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("LDE", 0): ("shipped", "Objeto saiu para entrega ao remetente", "", "Acompanhar"),
("LDI", 0): (
"waiting_retrieval",
"Objeto aguardando retirada no endereço indicado",
"Endereço:",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("OEC", 0): (
"shipped",
"Objeto saiu para entrega ao destinatário",
"",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("PO", 0): ("shipped", "Objeto postado", "", "Acompanhar"),
("RO", 0): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("TRI", 0): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("BDE", 1): (
"delivered",
"Objeto entregue ao destinatário",
"Recebido por:",
"Finalizar a entrega. Não é mais necessário prosseguir com o acompanhamento.",
),
("BDI", 1): (
"delivered",
"Objeto entregue ao destinatário",
"Recebido por:",
"Finalizar a entrega. Não é mais necessário prosseguir com o acompanhamento.",
),
("BDR", 1): (
"delivered",
"Objeto entregue ao destinatário",
"Recebido por:",
"Finalizar a entrega. Não é mais necessário prosseguir com o acompanhamento.",
),
("BLQ", 1): (
"shipped",
"Entrega de objeto bloqueada a pedido do remetente",
"Objeto em análise de destinação",
"Acompanhar",
),
("BLQ", 2): ("shipped", "Tentativa de suspensão da entrega", "Objeto em análise de destinação", "Acompanhar"),
("CD", 1): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("CO", 1): ("shipped", "Objeto coletado", "", "Acompanhar"),
("CUN", 1): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("DO", 1): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("EST", 1): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("FC", 1): (
"shipped",
"Objeto será devolvido por solicitação do remetente",
"",
"Acompanhar o retorno do objeto ao remetente.",
),
("FC", 10): (
"shipped",
"Objeto recebido na unidade de distribuição",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("FC", 47): (
"shipped",
"Objeto será devolvido por solicitação do contratante/remetente",
"Em tratamento, aguarde.",
"Acompanhar",
),
("IDC", 1): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("LDI", 1): (
"waiting_retrieval",
"Objeto aguardando retirada no endereço indicado",
"Endereço:",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("OEC", 1): ("shipped", "Objeto saiu para entrega ao destinatário", "", "Acompanhar"),
("PMT", 1): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("PO", 1): ("shipped", "Objeto postado", "", "Acompanhar"),
("RO", 1): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("BDE", 2): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Aguarde: Objeto estará disponível para retirada na unidade a ser informada.",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("BDI", 2): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Aguarde: Objeto estará disponível para retirada na unidade a ser informada.",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("BDR", 2): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Aguarde: Objeto estará disponível para retirada na unidade a ser informada.",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("CD", 2): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("DO", 2): ("shipped", "Objeto encaminhado para", "<nome da cidade>", "Acompanhar"),
("EST", 2): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("FC", 2): ("shipped", "Objeto com data de entrega agendada", "", "Acompanhar"),
("IDC", 2): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("LDI", 2): (
"waiting_retrieval",
"Objeto disponível para retirada em Caixa Postal",
"",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("BDE", 3): (
"waiting_retrieval",
"Remetente não retirou objeto na Unidade dos Correios",
"Objeto em análise de destinação",
"Acompanhar. O interessado não buscou o objeto na unidade dos Correios durante o período de guarda.",
),
("BDI", 3): (
"waiting_retrieval",
"Remetente não retirou objeto na Unidade dos Correios",
"Objeto em análise de destinação",
"Acompanhar. O interessado não buscou o objeto na unidade dos Correios durante o período de guarda.",
),
("BDR", 3): (
"waiting_retrieval",
"Remetente não retirou objeto na Unidade dos Correios",
"Objeto em análise de destinação",
"Acompanhar. O interessado não buscou o objeto na unidade dos Correios durante o período de guarda.",
),
("CD", 3): ("shipped", "Objeto recebido na Unidade dos Correios", "", "Acompanhar"),
("EST", 3): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("FC", 3): ("shipped", "Objeto mal encaminhado", "Encaminhamento a ser corrigido.", "Acompanhar"),
("IDC", 3): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("LDI", 3): (
"waiting_retrieval",
"Objeto aguardando retirada no endereço indicado",
"Endereço:",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("BDE", 4): (
"delivery_rejected",
"A entrega não pode ser efetuada - Cliente recusou-se a receber",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 4): (
"delivery_rejected",
"A entrega não pode ser efetuada - Cliente recusou-se a receber",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 4): (
"delivery_rejected",
"A entrega não pode ser efetuada - Cliente recusou-se a receber",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("EST", 4): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("FC", 4): (
"shipped",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("IDC", 4): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("BDE", 5): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 5): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 5): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("EST", 5): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("FC", 5): ("shipped", "Objeto devolvido aos Correios", "", "Acompanhar"),
("IDC", 5): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("BDE", 6): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Cliente desconhecido no local",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 6): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Cliente desconhecido no local",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 6): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Cliente desconhecido no local",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("EST", 6): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("IDC", 6): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("BDE", 7): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("BDI", 7): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("BDR", 7): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("FC", 7): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Empresa sem expediente",
"A entrega deverá ocorrer no próximo dia útil",
"Acompanhar",
),
("IDC", 7): ("lost", "Objeto não localizado", "Houve indenização dos valores correspondentes", "Acompanhar"),
("BDE", 8): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 8): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 8): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("FC", 8): (
"shipped",
"Área com distribuição sujeita a prazo diferenciado",
"Restrição de entrega domiciliar temporária",
"Acompanhar",
),
("BDE", 9): (
"lost",
"Objeto não localizado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 9): (
"lost",
"Objeto não localizado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 9): (
"lost",
"Objeto não localizado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("EST", 9): ("ignore_last_entry", "Favor desconsiderar a informação anterior", "", "Acompanhar"),
("FC", 9): ("delivery_unsuccessful", "Remetente não retirou objeto na Unidade dos Correios", "", "Acompanhar"),
("LDE", 9): ("shipped", "Objeto saiu para entrega ao remetente", "", "Acompanhar"),
("OEC", 9): ("shipped", "Objeto saiu para entrega ao remetente", "", "Acompanhar"),
("PO", 9): (
"shipped",
"Objeto postado após o horário limite da agência",
"Objeto sujeito a encaminhamento no próximo dia útil",
"Acompanhar",
),
("BDE", 10): (
"shipped_wrong_address",
"A entrega não pode ser efetuada - Cliente mudou-se",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 10): (
"shipped_wrong_address",
"A entrega não pode ser efetuada - Cliente mudou-se",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 10): (
"shipped_wrong_address",
"A entrega não pode ser efetuada - Cliente mudou-se",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 12): (
"waiting_retrieval",
"Remetente não retirou objeto na Unidade dos Correios",
"Objeto em análise de destinação",
"Acionar atendimento dos Correios.",
),
("BDI", 12): (
"waiting_retrieval",
"Remetente não retirou objeto na Unidade dos Correios",
"Objeto em análise de destinação",
"Acionar atendimento dos Correios.",
),
("BDR", 12): (
"waiting_retrieval",
"Remetente não retirou objeto na Unidade dos Correios",
"Objeto em análise de destinação",
"Acionar atendimento dos Correios.",
),
("LDI", 4): (
"waiting_retrieval",
"Objeto aguardando retirada no endereço indicado",
"Endereço:",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("LDI", 14): (
"waiting_retrieval",
"Objeto aguardando retirada no endereço indicado",
"Endereço:",
"Acompanhar. O interessado deverá buscar o objeto em uma Unidade dos Correios.",
),
("BDI", 14): ("shipped", "Desistência de postagem pelo remetente", "", "Acompanhar"),
("BDR", 14): ("shipped", "Desistência de postagem pelo remetente", "", "Acompanhar"),
("BDR", 15): (
"shipped",
"Recebido na unidade de distribuição",
"Por determinação judicial o objeto será entregue em até 7 dias",
"Acompanhar",
),
("PAR", 15): ("delivered", "Objeto recebido em <destino>", "", "Acompanhar"),
("PAR", 16): (
"customs_control",
"Objeto recebido no Brasil",
"Objeto sujeito à fiscalização e atraso na entrega",
"Acompanhar",
),
("PAR", 17): ("customs_control", "Objeto liberado pela alfândega", "", "Acompanhar"),
("PAR", 18): ("shipped", "Objeto recebido na unidade de exportação", "", "Acompanhar"),
("BDE", 18): (
"shipped",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Será realizada nova tentativa de entrega no sábado",
"Acompanhar",
),
("BDR", 18): (
"shipped",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Será realizada nova tentativa de entrega no sábado",
"Acompanhar",
),
("BDE", 19): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 19): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 19): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Endereço incorreto",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 20): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Será realizada nova tentativa de entrega",
"Acompanhar",
),
("BDI", 20): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Será realizada nova tentativa de entrega",
"Acompanhar",
),
("BDR", 20): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Será realizada nova tentativa de entrega",
"Acompanhar",
),
("BDE", 21): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 21): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 21): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Carteiro não atendido",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 22): ("shipped", "Objeto devolvido aos Correios", "", "Acompanhar"),
("BDI", 22): ("shipped", "Objeto devolvido aos Correios", "", "Acompanhar"),
("BDR", 22): ("shipped", "Objeto devolvido aos Correios", "", "Acompanhar"),
("BDE", 23): ("returned", "Objeto devolvido ao remetente", "Recebido por:", "Acompanhar"),
("BDI", 23): ("returned", "Objeto devolvido ao remetente", "Recebido por:", "Acompanhar"),
("BDR", 23): ("returned", "Objeto devolvido ao remetente", "Recebido por:", "Acompanhar"),
("BDE", 24): ("waiting_retrieval", "Objeto disponível para retirada em Caixa Postal", "", "Acompanhar"),
("BDI", 24): ("waiting_retrieval", "Objeto disponível para retirada em Caixa Postal", "", "Acompanhar"),
("BDR", 24): ("waiting_retrieval", "Objeto disponível para retirada em Caixa Postal", "", "Acompanhar"),
("BDE", 25): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Empresa sem expediente",
"A entrega deverá ocorrer no próximo dia útil",
"Acompanhar",
),
("BDI", 25): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Empresa sem expediente",
"A entrega deverá ocorrer no próximo dia útil",
"Acompanhar",
),
("BDR", 25): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Empresa sem expediente",
"A entrega deverá ocorrer no próximo dia útil",
"Acompanhar",
),
("BDE", 26): (
"waiting_retrieval",
"Destinatário não retirou objeto na Unidade dos Correios",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 26): (
"waiting_retrieval",
"Destinatário não retirou objeto na Unidade dos Correios",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 26): (
"waiting_retrieval",
"Destinatário não retirou objeto na Unidade dos Correios",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 28): (
"damaged",
"Objeto e/ou conteúdo avariado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 28): (
"damaged",
"Objeto e/ou conteúdo avariado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 28): (
"damaged",
"Objeto e/ou conteúdo avariado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDE", 30): ("delivery_unsuccessful", "Saída não efetuada", "Em tratamento, aguarde.", "Acompanhar"),
("BDI", 30): ("delivery_unsuccessful", "Saída não efetuada", "Em tratamento, aguarde.", "Acompanhar"),
("BDR", 30): ("delivery_unsuccessful", "Saída não efetuada", "Em tratamento, aguarde.", "Acompanhar"),
("BDE", 32): ("shipped", "Objeto com data de entrega agendada", "", "Acompanhar"),
("BDI", 32): ("shipped", "Objeto com data de entrega agendada", "", "Acompanhar"),
("BDR", 32): ("shipped", "Objeto com data de entrega agendada", "", "Acompanhar"),
("BDE", 33): (
"delivery_rejected",
"A entrega não pode ser efetuada - Destinatário não apresentou documento exigido",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 33): (
"delivery_rejected",
"A entrega não pode ser efetuada - Destinatário não apresentou documento exigido",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 33): (
"delivery_rejected",
"A entrega não pode ser efetuada - Destinatário não apresentou documento exigido",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 34): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Logradouro com numeração irregular",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("BDI", 34): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Logradouro com numeração irregular",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("BDR", 34): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada - Logradouro com numeração irregular",
"Objeto sujeito a atraso na entrega ou a devolução ao remetente",
"Acompanhar",
),
("BDE", 35): (
"delivery_unsuccessful",
"Coleta ou entrega de objeto não efetuada",
"Será realizada nova tentativa de coleta ou entrega",
"Acompanhar",
),
("BDI", 35): (
"delivery_unsuccessful",
"Coleta ou entrega de objeto não efetuada",
"Será realizada nova tentativa de coleta ou entrega",
"Acompanhar",
),
("BDR", 35): (
"delivery_unsuccessful",
"Coleta ou entrega de objeto não efetuada",
"Será realizada nova tentativa de coleta ou entrega",
"Acompanhar",
),
("BDE", 36): (
"delivery_unsuccessful",
"Coleta ou entrega de objeto não efetuada",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 36): (
"delivery_unsuccessful",
"Coleta ou entrega de objeto não efetuada",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 36): (
"delivery_unsuccessful",
"Coleta ou entrega de objeto não efetuada",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 37): (
"damaged",
"Objeto e/ou conteúdo avariado por acidente com veículo",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 37): (
"damaged",
"Objeto e/ou conteúdo avariado por acidente com veículo",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 37): (
"damaged",
"Objeto e/ou conteúdo avariado por acidente com veículo",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDE", 38): (
"delivery_unsuccessful",
"Objeto endereçado à empresa falida",
"Objeto será encaminhado para entrega ao administrador judicial",
"Acompanhar",
),
("BDI", 38): (
"delivery_unsuccessful",
"Objeto endereçado à empresa falida",
"Objeto será encaminhado para entrega ao administrador judicial",
"Acompanhar",
),
("BDR", 38): (
"delivery_unsuccessful",
"Objeto endereçado à empresa falida",
"Objeto será encaminhado para entrega ao administrador judicial",
"Acompanhar",
),
("BDI", 39): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada",
"Objeto em análise de destinação",
"Acompanhar",
),
("BDR", 39): (
"delivery_unsuccessful",
"A entrega não pode ser efetuada",
"Objeto em análise de destinação",
"Acompanhar",
),
("BDE", 40): (
"customs_control",
"A importação do objeto/conteúdo não foi autorizada pelos órgãos fiscalizadores",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 40): (
"customs_control",
"A importação do objeto/conteúdo não foi autorizada pelos órgãos fiscalizadores",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 40): (
"customs_control",
"A importação do objeto/conteúdo não foi autorizada pelos órgãos fiscalizadores",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 41): ("unknown", "A entrega do objeto está condicionada à composição do lote", "", "Acompanhar"),
("BDI", 41): ("unknown", "A entrega do objeto está condicionada à composição do lote", "", "Acompanhar"),
("BDR", 41): ("unknown", "A entrega do objeto está condicionada à composição do lote", "", "Acompanhar"),
("BDE", 42): (
"unknown",
"Lote de objetos incompleto",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 42): (
"unknown",
"Lote de objetos incompleto",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 42): (
"unknown",
"Lote de objetos incompleto",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 43): (
"customs_control",
"Objeto apreendido por órgão de fiscalização ou outro órgão anuente",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 43): (
"customs_control",
"Objeto apreendido por órgão de fiscalização ou outro órgão anuente",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 43): (
"customs_control",
"Objeto apreendido por órgão de fiscalização ou outro órgão anuente",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 75): (
"customs_control",
"Objeto apreendido por: POLICIA FEDERAL",
"O objeto está em poder da autoridade competente",
"Acionar atendimento dos Correios.",
),
("BDE", 45): (
"shipped",
"Objeto recebido na unidade de distribuição",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("BDI", 45): (
"shipped",
"Objeto recebido na unidade de distribuição",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("BDR", 45): (
"shipped",
"Objeto recebido na unidade de distribuição",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("BDE", 46): (
"delivery_unsuccessful",
"Tentativa de entrega não efetuada",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("BDI", 46): (
"delivery_unsuccessful",
"Tentativa de entrega não efetuada",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("BDR", 46): (
"delivery_unsuccessful",
"Tentativa de entrega não efetuada",
"Entrega prevista para o próximo dia útil",
"Acompanhar",
),
("BDE", 47): (
"delivery_unsuccessful",
"Saída para entrega cancelada",
"Será efetuado novo lançamento para entrega",
"Acompanhar",
),
("BDI", 47): (
"delivery_unsuccessful",
"Saída para entrega cancelada",
"Será efetuado novo lançamento para entrega",
"Acompanhar",
),
("BDR", 47): (
"delivery_unsuccessful",
"Saída para entrega cancelada",
"Será efetuado novo lançamento para entrega",
"Acompanhar",
),
("BDE", 48): (
"delivery_rejected",
"Retirada em Unidade dos Correios não autorizada pelo remetente",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 48): (
"delivery_rejected",
"Retirada em Unidade dos Correios não autorizada pelo remetente",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 48): (
"delivery_rejected",
"Retirada em Unidade dos Correios não autorizada pelo remetente",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 49): (
"delivery_unsuccessful",
"As dimensões do objeto impossibilitam o tratamento e a entrega",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 49): (
"delivery_unsuccessful",
"As dimensões do objeto impossibilitam o tratamento e a entrega",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 49): (
"delivery_unsuccessful",
"As dimensões do objeto impossibilitam o tratamento e a entrega",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 50): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 50): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 50): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDE", 51): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 51): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 51): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDE", 52): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDI", 52): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDR", 52): (
"lost",
"Objeto roubado",
"Favor entrar em contato com os Correios.",
"Acionar atendimento dos Correios.",
),
("BDE", 53): (
"shipped",
"Objeto reimpresso e reenviado",
"",
"Acompanhar. O objeto impresso pelos Correios precisou ser refeito e reenviado.",
),
("BDI", 53): (
"shipped",
"Objeto reimpresso e reenviado",
"",
"Acompanhar. O objeto impresso pelos Correios precisou ser refeito e reenviado.",
),
("BDR", 53): (
"shipped",
"Objeto reimpresso e reenviado",
"",
"Acompanhar. O objeto impresso pelos Correios precisou ser refeito e reenviado.",
),
("BDE", 54): (
"customs_control",
"Para recebimento do objeto, é necessário o pagamento do ICMS Importação",
"",
"Acompanhar. O interessado deverá pagar o imposto devido para retirar o objeto em uma Unidade dos Correios.",
),
("BDI", 54): (
"customs_control",
"Para recebimento do objeto, é necessário o pagamento do ICMS Importação",
"",
"Acompanhar. O interessado deverá pagar o imposto devido para retirar o objeto em uma Unidade dos Correios.",
),
("BDR", 54): (
"customs_control",
"Para recebimento do objeto, é necessário o pagamento do ICMS Importação",
"",
"Acompanhar. O interessado deverá pagar o imposto devido para retirar o objeto em uma Unidade dos Correios.",
),
("BDE", 55): ("customs_control", "Solicitada revisão do tributo estabelecido", "", "Acompanhar"),
("BDI", 55): ("customs_control", "Solicitada revisão do tributo estabelecido", "", "Acompanhar"),
("BDR", 55): ("customs_control", "Solicitada revisão do tributo estabelecido", "", "Acompanhar"),
("BDE", 56): (
"customs_control",
"Declaração aduaneira ausente ou incorreta",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDI", 56): (
"customs_control",
"Declaração aduaneira ausente ou incorreta",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDR", 56): (
"customs_control",
"Declaração aduaneira ausente ou incorreta",
"Objeto será devolvido ao remetente",
"Acompanhar o retorno do objeto ao remetente.",
),
("BDE", 57): ("customs_control", "Revisão de tributo concluída - Objeto liberado", "", "Acompanhar"),
("BDI", 57): ("customs_control", "Revisão de tributo concluída - Objeto liberado", "", "Acompanhar"),
("BDR", 57): ("customs_control", "Revisão de tributo concluída - Objeto liberado", "", "Acompanhar"),
("BDE", 58): (
"customs_control",
"Revisão de tributo concluída - Tributo alterado",
"O valor do tributo pode ter aumentado ou diminuído",
"Acompanhar",
),
("BDI", 58): (
"customs_control",
"Revisão de tributo concluída - Tributo alterado",
"O valor do tributo pode ter aumentado ou diminuído",
"Acompanhar",
),
("BDR", 58): (
"customs_control",
"Revisão de tributo concluída - Tributo alterado",
"O valor do tributo pode ter aumentado ou diminuído",
"Acompanhar",
),
("BDE", 59): (
"customs_control",
"Revisão de tributo concluída - Tributo mantido",
"Poderá haver incidência de juros e multa.",
"Acompanhar",
),
("BDI", 59): (
"customs_control",
"Revisão de tributo concluída - Tributo mantido",
"Poderá haver incidência de juros e multa.",
"Acompanhar",
),
("BDR", 59): (
"customs_control",
"Revisão de tributo concluída - Tributo mantido",
"Poderá haver incidência de juros e multa.",
"Acompanhar",
),
("BDR", 60): ("shipped", "O objeto encontra-se aguardando prazo para refugo", "", "Acompanhar"),
("BDI", 60): ("shipped", "O objeto encontra-se aguardando prazo para refugo", "", "Acompanhar"),
("BDE", 66): (
"shipped_delayed",
"Área com distribuição sujeita a prazo diferenciado",
"Restrição de entrega domiciliar temporária",
"Acompanhar",
),
("BDI", 66): (
"shipped_delayed",
"Área com distribuição sujeita a prazo diferenciado",
"Restrição de entrega domiciliar temporária",
"Acompanhar",
),
("BDR", 66): (
"shipped_delayed",
"Área com distribuição sujeita a prazo diferenciado",
"Restrição de entrega domiciliar temporária",
"Acompanhar",
),
("BDE", 68): (
"waiting_retrieval",
"Objeto aguardando retirada em armário inteligente",
"Estará disponível por até 3 dias, a partir de hoje",
"Acompanhar",
),
("BDI", 68): (
"waiting_retrieval",
"Objeto aguardando retirada em armário inteligente",
"Estará disponível por até 3 dias, a partir de hoje",
"Acompanhar",
),
("BDR", 68): (
"waiting_retrieval",
"Objeto aguardando retirada em armário inteligente",
"Estará disponível por até 3 dias, a partir de hoje",
"Acompanhar",
),
("BDE", 69): ("shipped_delayed", "Objeto com atraso na entrega", "", "Acompanhar"),
("BDI", 69): ("shipped_delayed", "Objeto com atraso na entrega", "", "Acompanhar"),
("BDR", 69): ("shipped_delayed", "Objeto com atraso na entrega", "", "Acompanhar"),
("BDE", 80): ("lost", "Objeto extraviado", "", "Acionar a CAC dos Correios"),
("BDI", 80): ("lost", "Objeto extraviado", "", "Acionar a CAC dos Correios"),
("BDR", 80): ("lost", "Objeto extraviado", "", "Acionar a CAC dos Correios"),
("CMR", 1): ("shipped", "Conferido", "", "Acompanhar"),
}
ZIP_CODE_MAP = {
"SP": (RangeSet((1000, 20000)), RangeSet((1000, 6000), (8000, 8500))), # all # capital
"RJ": (RangeSet((20000, 29000)), RangeSet((20000, 23800))),
"ES": (RangeSet((29000, 30000)), RangeSet((29000, 29100))),
"MG": (RangeSet((30000, 40000)), RangeSet((30000, 32000))),
"BA": (RangeSet((40000, 49000)), RangeSet((40000, 42600))),
"SE": (RangeSet((49000, 50000)), RangeSet((49000, 49100))),
"PE": (RangeSet((50000, 57000)), RangeSet((50000, 53000))),
"AL": (RangeSet((57000, 58000)), RangeSet((57000, 57100))),
"PB": (RangeSet((58000, 59000)), RangeSet((58000, 58100))),
"RN": (RangeSet((59000, 60000)), RangeSet((59000, 59140))),
"CE": (RangeSet((60000, 64000)), RangeSet((60000, 61600))),
"PI": (RangeSet((64000, 65000)), RangeSet((64000, 64100))),
"MA": (RangeSet((65000, 66000)), RangeSet((65000, 65110))),
"PA": (RangeSet((66000, 68900)), RangeSet((66000, 67000))),
"AP": (RangeSet((68900, 69000)), RangeSet((68900, 68915))),
"AM": (RangeSet((69000, 69300), (69400, 69900)), RangeSet((69000, 69100), (69000, 69100))),
"RR": (RangeSet((69300, 69400)), RangeSet((69300, 69340))),
"AC": (RangeSet((69900, 70000)), RangeSet((69900, 69925))),
"DF": (RangeSet((70000, 72800), (73000, 73700)), RangeSet((70000, 72800), (73000, 73700))),
"GO": (RangeSet((72800, 73000), (73700, 76800)), RangeSet((74000, 74900))),
"TO": (RangeSet((77000, 78000)), RangeSet((77000, 77300))),
"MT": (RangeSet((78000, 78900)), RangeSet((78000, 78110))),
"RO": (RangeSet((76800, 77000), (78900, 79000)), RangeSet((76800, 76850), (78900, 78925))),
"MS": (RangeSet((79000, 80000)), RangeSet((79000, 79130))),
"PR": (RangeSet((80000, 88000)), RangeSet((80000, 83000))),
"SC": (RangeSet((88000, 90000)), RangeSet((88000, 88100))),
"RS": (RangeSet((90000, 100000)), RangeSet((90000, 92000))),
} # type: Dict[str, Tuple[RangeSet, RangeSet]]
ZIP_CODES = RangeSet(*(p[0] for p in ZIP_CODE_MAP.values()))
FREIGHT_ERROR_RESPONSES = {
0: "Processamento com sucesso",
-1: "Código de serviço inválido",
-2: "CEP de origem inválido",
-3: "CEP de destino inválido",
-4: "Peso excedido",
-5: "O Valor Declarado não deve exceder R$ 10.000,00",
-6: "Serviço indisponível para o trecho informado",
-7: "O Valor Declarado é obrigatório para este serviço",
-8: "Este serviço não aceita Mão Própria",
-9: "Este serviço não aceita Aviso de Recebimento",
-10: "Precificação indisponível para o trecho informado",
-11: "Para definição do preço deverão ser informados, também, o comprimento, "
"a largura e altura do objeto em centímetros (cm).",
-12: "Comprimento inválido.",
-13: "Largura inválida.",
-14: "Altura inválida.",
-15: "O comprimento não pode ser maior que 105 cm.",
-16: "A largura não pode ser maior que 105 cm.",
-17: "A altura não pode ser maior que 105 cm.",
-18: "A altura não pode ser inferior a 2 cm.",
-20: "A largura não pode ser inferior a 11 cm.",
-22: "O comprimento não pode ser inferior a 16 cm.",
-23: "A soma resultante do comprimento + largura + altura não deve superar a 200 cm.",
-24: "Comprimento inválido.",
-25: "Diâmetro inválido",
-26: "Informe o comprimento.",
-27: "Informe o diâmetro.",
-28: "O comprimento não pode ser maior que 105 cm.",
-29: "O diâmetro não pode ser maior que 91 cm.",
-30: "O comprimento não pode ser inferior a 18 cm.",
-31: "O diâmetro não pode ser inferior a 5 cm.",
-32: "A soma resultante do comprimento + o dobro do diâmetro não deve superar a 200 cm.",
-33: "Sistema temporariamente fora do ar. Favor tentar mais tarde.",
-34: "Código Administrativo ou Senha inválidos.",
-35: "Senha incorreta.",
-36: "Cliente não possui contrato vigente com os Correios.",
-37: "Cliente não possui serviço ativo em seu contrato.",
-38: "Serviço indisponível para este código administrativo.",
-39: "Peso excedido para o formato envelope",
-40: (
"Para definicao do preco deverao ser informados, tambem, o comprimento e a "
"largura e altura do objeto em centimetros (cm)."
),
-41: "O comprimento nao pode ser maior que 60 cm.",
-42: "O comprimento nao pode ser inferior a 16 cm.",
-43: "A soma resultante do comprimento + largura nao deve superar a 120 cm.",
-44: "A largura nao pode ser inferior a 11 cm.",
-45: "A largura nao pode ser maior que 60 cm.",
-888: "Erro ao calcular a tarifa",
6: "Localidade de origem não abrange o serviço informado",
# 7: 'Localidade de destino não abrange o serviço informado',
7: "Serviço indisponível, tente mais tarde", #
8: "Serviço indisponível para o trecho informado",
9: "CEP inicial pertencente a Área de Risco.",
10: "Área com entrega temporariamente sujeita a prazo diferenciado.",
11: "CEP inicial e final pertencentes a Área de Risco",
99: "Outros erros diversos do .Net",
}
FREIGHT_ERROR_INITIAL_ZIPCODE_RESTRICTED = 9
FREIGHT_ERROR_FINAL_ZIPCODE_RESTRICTED = 10
FREIGHT_ERROR_INITIAL_AND_FINAL_ZIPCODE_RESTRICTED = 11
| 39.262614
| 117
| 0.596591
|
e28e4f20768f756e3e2cf3b711294274d47c6a80
| 1,873
|
py
|
Python
|
examples/examples_test.py
|
jarrodmcc/OpenFermion-Cirq
|
d0755e92e68c6de1b52f5004dc5a75b445f01349
|
[
"Apache-2.0"
] | null | null | null |
examples/examples_test.py
|
jarrodmcc/OpenFermion-Cirq
|
d0755e92e68c6de1b52f5004dc5a75b445f01349
|
[
"Apache-2.0"
] | null | null | null |
examples/examples_test.py
|
jarrodmcc/OpenFermion-Cirq
|
d0755e92e68c6de1b52f5004dc5a75b445f01349
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import nbformat
import pytest
def find_examples_jupyter_notebook_paths():
examples_folder = os.path.dirname(__file__)
for filename in os.listdir(examples_folder):
if not filename.endswith('.ipynb'):
continue
yield os.path.join(examples_folder, filename)
@pytest.mark.parametrize('path', find_examples_jupyter_notebook_paths())
def test_can_run_examples_jupyter_notebook(path):
notebook = nbformat.read(path, nbformat.NO_CONVERT)
state = {} # type: Dict[str, Any]
for cell in notebook.cells:
if cell.cell_type == 'code' and not is_matplotlib_cell(cell):
try:
exec(strip_magics_and_shows(cell.source), state)
# coverage: ignore
except:
print('Failed to run {}.'.format(path))
raise
def is_matplotlib_cell(cell: nbformat.NotebookNode):
return "%matplotlib" in cell.source
def strip_magics_and_shows(text: str) -> str:
"""Remove Jupyter magics and pyplot show commands."""
lines = [line for line in text.split('\n')
if not contains_magic_or_show(line)]
return '\n'.join(lines)
def contains_magic_or_show(line: str) -> bool:
return (line.strip().startswith('%') or
'pyplot.show(' in line or
'plt.show(' in line)
| 32.859649
| 74
| 0.684463
|
6ca1c3fa0cda06137248c06e3ca744e7e1604d37
| 5,984
|
py
|
Python
|
tests/test_gbox_ops.py
|
opendatacube/odc-geo
|
4f9004720d899dff4bf0b208e4fe602790d055d7
|
[
"Apache-2.0"
] | 5
|
2021-12-22T01:32:24.000Z
|
2022-03-10T07:50:34.000Z
|
tests/test_gbox_ops.py
|
opendatacube/odc-geo
|
4f9004720d899dff4bf0b208e4fe602790d055d7
|
[
"Apache-2.0"
] | 29
|
2022-01-11T08:48:23.000Z
|
2022-03-29T09:03:42.000Z
|
tests/test_gbox_ops.py
|
opendatacube/odc-geo
|
4f9004720d899dff4bf0b208e4fe602790d055d7
|
[
"Apache-2.0"
] | 2
|
2022-01-26T23:20:34.000Z
|
2022-03-21T16:54:00.000Z
|
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from affine import Affine
from odc.geo import CRSMismatchError
from odc.geo import geobox as gbx
from odc.geo import geom as geometry
from odc.geo import wh_
from odc.geo.geobox import GeoBox
# pylint: disable=pointless-statement,too-many-statements
epsg3857 = geometry.CRS("EPSG:3857")
def test_gbox_ops():
s = GeoBox(wh_(1000, 100), Affine(10, 0, 12340, 0, -10, 316770), epsg3857)
assert s.shape == (100, 1000)
d = gbx.flipy(s)
assert d.shape == s.shape
assert d.crs is s.crs
assert d.resolution.yx == (-s.resolution.y, s.resolution.x)
assert d.extent.contains(s.extent)
with pytest.raises(ValueError):
# flipped grid
(s | d)
with pytest.raises(ValueError):
# flipped grid
(s & d)
d = gbx.flipx(s)
assert d.shape == s.shape
assert d.crs is s.crs
assert d.resolution.yx == (s.resolution.y, -s.resolution.x)
assert d.extent.contains(s.extent)
assert gbx.flipy(gbx.flipy(s)).affine == s.affine
assert gbx.flipx(gbx.flipx(s)).affine == s.affine
d = gbx.zoom_out(s, 2)
assert d.shape == (50, 500)
assert d.crs is s.crs
assert d.extent.contains(s.extent)
assert d.resolution.yx == (s.resolution.y * 2, s.resolution.x * 2)
d = gbx.zoom_out(s, 2 * max(s.shape))
assert d.shape == (1, 1)
assert d.crs is s.crs
assert d.extent.contains(s.extent)
d = gbx.zoom_out(s, 1.33719)
assert d.crs is s.crs
assert d.extent.contains(s.extent)
assert all(ds < ss for ds, ss in zip(d.shape, s.shape))
with pytest.raises(ValueError):
# lower resolution grid
(s | d)
with pytest.raises(ValueError):
# lower resolution grid
(s & d)
d = gbx.zoom_to(s, s.shape)
assert d == s
d = gbx.zoom_to(s, (1, 3))
assert d.shape == (1, 3)
assert d.extent == s.extent
d = gbx.zoom_to(s, (10000, 10000))
assert d.shape == (10000, 10000)
assert d.extent == s.extent
d = gbx.pad(s, 1)
assert d.crs is s.crs
assert d.resolution == s.resolution
assert d.extent.contains(s.extent)
assert s.extent.contains(d.extent) is False
assert d[1:-1, 1:-1].affine == s.affine
assert d[1:-1, 1:-1].shape == s.shape
assert d == (s | d)
assert s == (s & d)
d = gbx.pad_wh(s, 10)
assert d == s
d = gbx.pad_wh(s, 100, 8)
assert d.width == s.width
assert d.height % 8 == 0
assert 0 < d.height - s.height < 8
assert d.affine == s.affine
assert d.crs is s.crs
d = gbx.pad_wh(s, 13, 17)
assert d.affine == s.affine
assert d.crs is s.crs
assert d.height % 17 == 0
assert d.width % 13 == 0
assert 0 < d.height - s.height < 17
assert 0 < d.width - s.width < 13
d = gbx.translate_pix(s, 1, 2)
assert d.crs is s.crs
assert d.resolution == s.resolution
assert d.extent != s.extent
assert s[2:3, 1:2].extent == d[:1, :1].extent
d = gbx.translate_pix(s, -10, -2)
assert d.crs is s.crs
assert d.resolution == s.resolution
assert d.extent != s.extent
assert s[:1, :1].extent == d[2:3, 10:11].extent
d = gbx.translate_pix(s, 0.1, 0)
assert d.crs is s.crs
assert d.shape == s.shape
assert d.resolution == s.resolution
assert d.extent != s.extent
assert d.extent.contains(s[:, 1:].extent)
d = gbx.translate_pix(s, 0, -0.5)
assert d.crs is s.crs
assert d.shape == s.shape
assert d.resolution == s.resolution
assert d.extent != s.extent
assert s.extent.contains(d[1:, :].extent)
d = gbx.affine_transform_pix(s, Affine(1, 0, 0, 0, 1, 0))
assert d.crs is s.crs
assert d.shape == s.shape
assert d.resolution == s.resolution
assert d.extent == s.extent
d = gbx.rotate(s, 180)
assert d.crs is s.crs
assert d.shape == s.shape
assert d.extent != s.extent
np.testing.assert_almost_equal(d.extent.area, s.extent.area, 5)
assert s[49:52, 499:502].extent.contains(
d[50:51, 500:501].extent
), "Check that center pixel hasn't moved"
def test_gbox_tiles():
A = Affine.identity()
H, W = (300, 200)
h, w = (10, 20)
gbox = GeoBox(wh_(W, H), A, epsg3857)
tt = gbx.GeoboxTiles(gbox, (h, w))
assert tt.shape == (300 / 10, 200 / 20)
assert tt.base is gbox
assert tt[0, 0] == gbox[0:h, 0:w]
assert tt[0, 1] == gbox[0:h, w : w + w]
assert tt[0, 0] is tt[0, 0] # Should cache exact same object
assert tt[4, 1].shape == (h, w)
H, W = (11, 22)
h, w = (10, 9)
gbox = GeoBox(wh_(W, H), A, epsg3857)
tt = gbx.GeoboxTiles(gbox, (h, w))
assert tt.shape == (2, 3)
assert tt[1, 2] == gbox[10:11, 18:22]
# check .roi
assert tt.base[tt.roi[1, 2]] == tt[1, 2]
for idx in [tt.shape, (-1, 0), (0, -1), (-33, 1)]:
with pytest.raises(IndexError):
tt[idx]
with pytest.raises(IndexError):
tt.chunk_shape(idx)
cc = np.zeros(tt.shape, dtype="int32")
for idx in tt.tiles(gbox.extent):
cc[idx] += 1
np.testing.assert_array_equal(cc, np.ones(tt.shape))
assert list(tt.tiles(gbox[:h, :w].extent)) == [(0, 0)]
assert list(tt.tiles(gbox[:h, :w].extent.to_crs("epsg:4326"))) == [(0, 0)]
(H, W) = (11, 22)
(h, w) = (10, 20)
tt = gbx.GeoboxTiles(GeoBox(wh_(W, H), A, epsg3857), (h, w))
assert tt.chunk_shape((0, 0)) == (h, w)
assert tt.chunk_shape((0, 1)) == (h, 2)
assert tt.chunk_shape((1, 1)) == (1, 2)
assert tt.chunk_shape((1, 0)) == (1, w)
# check that overhang get's clamped properly
assert tt.range_from_bbox(gbox.pad(2).boundingbox) == (
range(0, tt.shape[0]),
range(0, tt.shape[1]),
)
with pytest.raises(CRSMismatchError):
_ = tt.range_from_bbox(gbox.geographic_extent.boundingbox)
| 29.333333
| 92
| 0.598429
|
b6b4ac1a9f41d40138e59a1f7d9e7d82c2d0be00
| 11,431
|
py
|
Python
|
mriqc/interfaces/anatomical.py
|
effigies/mriqc
|
de60ff0f65e4fe0e315143fe3b75ecd940beb2b1
|
[
"BSD-3-Clause"
] | null | null | null |
mriqc/interfaces/anatomical.py
|
effigies/mriqc
|
de60ff0f65e4fe0e315143fe3b75ecd940beb2b1
|
[
"BSD-3-Clause"
] | null | null | null |
mriqc/interfaces/anatomical.py
|
effigies/mriqc
|
de60ff0f65e4fe0e315143fe3b75ecd940beb2b1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# @Author: oesteban
# @Date: 2016-01-05 11:29:40
# @Email: code@oscaresteban.es
# @Last modified by: oesteban
# @Last Modified time: 2016-11-21 18:59:13
""" Nipype interfaces to support anatomical workflow """
from __future__ import print_function, division, absolute_import, unicode_literals
import os.path as op
import numpy as np
import nibabel as nb
import scipy.ndimage as nd
from builtins import zip
from nipype import logging
from nipype.interfaces.base import (traits, TraitedSpec, File,
InputMultiPath, BaseInterfaceInputSpec)
from mriqc.utils.misc import _flatten_dict
from mriqc.interfaces.base import MRIQCBaseInterface
from mriqc.qc.anatomical import (snr, snr_dietrich, cnr, fber, efc, art_qi1,
art_qi2, volume_fraction, rpve, summary_stats,
cjv, wm2max)
IFLOGGER = logging.getLogger('interface')
class StructuralQCInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='file to be plotted')
in_noinu = File(exists=True, mandatory=True, desc='image after INU correction')
in_segm = File(exists=True, mandatory=True, desc='segmentation file from FSL FAST')
in_bias = File(exists=True, mandatory=True, desc='bias file')
head_msk = File(exists=True, mandatory=True, desc='head mask')
air_msk = File(exists=True, mandatory=True, desc='air mask')
artifact_msk = File(exists=True, mandatory=True, desc='air mask')
in_pvms = InputMultiPath(File(exists=True), mandatory=True,
desc='partial volume maps from FSL FAST')
in_tpms = InputMultiPath(File(), desc='tissue probability maps from FSL FAST')
mni_tpms = InputMultiPath(File(), desc='tissue probability maps from FSL FAST')
class StructuralQCOutputSpec(TraitedSpec):
summary = traits.Dict(desc='summary statistics per tissue')
icvs = traits.Dict(desc='intracranial volume (ICV) fractions')
rpve = traits.Dict(desc='partial volume fractions')
size = traits.Dict(desc='image sizes')
spacing = traits.Dict(desc='image sizes')
inu = traits.Dict(desc='summary statistics of the bias field')
snr = traits.Dict
snrd = traits.Dict
cnr = traits.Float
fber = traits.Float
efc = traits.Float
qi_1 = traits.Float
wm2max = traits.Float
cjv = traits.Float
out_qc = traits.Dict(desc='output flattened dictionary with all measures')
out_noisefit = File(exists=True, desc='plot of background noise and chi fitting')
tpm_overlap = traits.Dict
class StructuralQC(MRIQCBaseInterface):
"""
Computes anatomical :abbr:`QC (Quality Control)` measures on the
structural image given as input
"""
input_spec = StructuralQCInputSpec
output_spec = StructuralQCOutputSpec
def _run_interface(self, runtime): # pylint: disable=R0914
imnii = nb.load(self.inputs.in_file)
imdata = np.nan_to_num(imnii.get_data())
erode = np.all(np.array(imnii.get_header().get_zooms()[:3],
dtype=np.float32) < 1.2)
# Cast to float32
imdata = imdata.astype(np.float32)
# Remove negative values
imdata[imdata < 0] = 0
# Load image corrected for INU
inudata = np.nan_to_num(nb.load(self.inputs.in_noinu).get_data())
inudata[inudata < 0] = 0
segnii = nb.load(self.inputs.in_segm)
segdata = segnii.get_data().astype(np.uint8)
airdata = nb.load(self.inputs.air_msk).get_data().astype(np.uint8)
artdata = nb.load(self.inputs.artifact_msk).get_data().astype(np.uint8)
headdata = nb.load(self.inputs.head_msk).get_data().astype(np.uint8)
# SNR
snrvals = []
self._results['snr'] = {}
for tlabel in ['csf', 'wm', 'gm']:
snrvals.append(snr(inudata, segdata, fglabel=tlabel, erode=erode))
self._results['snr'][tlabel] = snrvals[-1]
self._results['snr']['total'] = float(np.mean(snrvals))
snrvals = []
self._results['snrd'] = {
tlabel: snr_dietrich(inudata, segdata, airdata, fglabel=tlabel, erode=erode)
for tlabel in ['csf', 'wm', 'gm']}
self._results['snrd']['total'] = float(
np.mean([val for _, val in list(self._results['snrd'].items())]))
# CNR
self._results['cnr'] = cnr(inudata, segdata)
# FBER
self._results['fber'] = fber(inudata, headdata)
# EFC
self._results['efc'] = efc(inudata)
# M2WM
self._results['wm2max'] = wm2max(imdata, segdata)
# Artifacts
self._results['qi_1'] = art_qi1(airdata, artdata)
# CJV
self._results['cjv'] = cjv(inudata, seg=segdata)
pvmdata = []
for fname in self.inputs.in_pvms:
pvmdata.append(nb.load(fname).get_data().astype(np.float32))
# ICVs
self._results['icvs'] = volume_fraction(pvmdata)
# RPVE
self._results['rpve'] = rpve(pvmdata, segdata)
# Summary stats
self._results['summary'] = summary_stats(imdata, pvmdata, airdata)
# Image specs
self._results['size'] = {'x': int(imdata.shape[0]),
'y': int(imdata.shape[1]),
'z': int(imdata.shape[2])}
self._results['spacing'] = {
i: float(v) for i, v in zip(
['x', 'y', 'z'], imnii.get_header().get_zooms()[:3])}
try:
self._results['size']['t'] = int(imdata.shape[3])
except IndexError:
pass
try:
self._results['spacing']['tr'] = float(imnii.get_header().get_zooms()[3])
except IndexError:
pass
# Bias
bias = nb.load(self.inputs.in_bias).get_data()[segdata > 0]
self._results['inu'] = {
'range': float(np.abs(np.percentile(bias, 95.) - np.percentile(bias, 5.))),
'med': float(np.median(bias))} #pylint: disable=E1101
mni_tpms = [nb.load(tpm).get_data() for tpm in self.inputs.mni_tpms]
in_tpms = [nb.load(tpm).get_data() for tpm in self.inputs.in_pvms]
overlap = fuzzy_jaccard(in_tpms, mni_tpms)
self._results['tpm_overlap'] = {
'csf': overlap[0],
'gm': overlap[1],
'wm': overlap[2]
}
# Flatten the dictionary
self._results['out_qc'] = _flatten_dict(self._results)
return runtime
class ArtifactMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='File to be plotted')
head_mask = File(exists=True, mandatory=True, desc='head mask')
nasion_post_mask = File(exists=True, mandatory=True,
desc='nasion to posterior of cerebellum mask')
class ArtifactMaskOutputSpec(TraitedSpec):
out_art_msk = File(exists=True, desc='output artifacts mask')
out_air_msk = File(exists=True, desc='output artifacts mask, without artifacts')
class ArtifactMask(MRIQCBaseInterface):
"""
Computes the artifact mask using the method described in [Mortamet2009]_.
"""
input_spec = ArtifactMaskInputSpec
output_spec = ArtifactMaskOutputSpec
def _run_interface(self, runtime):
imnii = nb.load(self.inputs.in_file)
imdata = np.nan_to_num(imnii.get_data().astype(np.float32))
# Remove negative values
imdata[imdata < 0] = 0
hmdata = nb.load(self.inputs.head_mask).get_data()
npdata = nb.load(self.inputs.nasion_post_mask).get_data()
# Invert head mask
airdata = np.ones_like(hmdata, dtype=np.uint8)
airdata[hmdata == 1] = 0
# Calculate distance to border
dist = nd.morphology.distance_transform_edt(airdata)
# Apply nasion-to-posterior mask
airdata[npdata == 1] = 0
dist[npdata == 1] = 0
dist /= dist.max()
# Run the artifact detection
qi1_img = artifact_mask(imdata, airdata, dist)
fname, ext = op.splitext(op.basename(self.inputs.in_file))
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
self._results['out_art_msk'] = op.abspath('{}_artifacts{}'.format(fname, ext))
self._results['out_air_msk'] = op.abspath('{}_noart-air{}'.format(fname, ext))
hdr = imnii.get_header().copy()
hdr.set_data_dtype(np.uint8)
nb.Nifti1Image(qi1_img, imnii.get_affine(), hdr).to_filename(
self._results['out_art_msk'])
airdata[qi1_img > 0] = 0
nb.Nifti1Image(airdata, imnii.get_affine(), hdr).to_filename(
self._results['out_air_msk'])
return runtime
class ComputeQI2InputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='File to be plotted')
air_msk = File(exists=True, mandatory=True, desc='air (without artifacts) mask')
erodemsk = traits.Bool(True, usedefault=True, desc='erode mask')
ncoils = traits.Int(12, usedefault=True, desc='number of coils')
class ComputeQI2OutputSpec(TraitedSpec):
qi2 = traits.Float(desc='computed QI2 value')
out_file = File(desc='output plot: noise fit')
class ComputeQI2(MRIQCBaseInterface):
"""
Computes the artifact mask using the method described in [Mortamet2009]_.
"""
input_spec = ComputeQI2InputSpec
output_spec = ComputeQI2OutputSpec
def _run_interface(self, runtime):
imdata = nb.load(self.inputs.in_file).get_data()
airdata = nb.load(self.inputs.air_msk).get_data()
qi2, out_file = art_qi2(imdata, airdata, ncoils=self.inputs.ncoils,
erodemask=self.inputs.erodemsk)
self._results['qi2'] = qi2
self._results['out_file'] = out_file
return runtime
def artifact_mask(imdata, airdata, distance, zscore=10.):
"""Computes a mask of artifacts found in the air region"""
from statsmodels.robust.scale import mad
if not np.issubdtype(airdata.dtype, np.integer):
airdata[airdata < .95] = 0
airdata[airdata > 0.] = 1
bg_img = imdata * airdata
if np.sum((bg_img > 0).astype(np.uint8)) < 100:
return np.zeros_like(airdata)
# Find the background threshold (the most frequently occurring value
# excluding 0)
bg_location = np.median(bg_img[bg_img > 0])
bg_spread = mad(bg_img[bg_img > 0])
bg_img[bg_img > 0] -= bg_location
bg_img[bg_img > 0] /= bg_spread
# Apply this threshold to the background voxels to identify voxels
# contributing artifacts.
qi1_img = np.zeros_like(bg_img)
qi1_img[bg_img > zscore] = 1
qi1_img[distance < .10] = 0
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 1)
qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)
qi1_img[airdata <= 0] = 0
return qi1_img
def fuzzy_jaccard(in_tpms, in_mni_tpms):
overlaps = []
for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):
tpm = tpm.reshape(-1)
mni_tpm = mni_tpm.reshape(-1)
num = np.min([tpm, mni_tpm], axis=0).sum()
den = np.max([tpm, mni_tpm], axis=0).sum()
overlaps.append(float(num/den))
return overlaps
| 36.288889
| 88
| 0.633278
|
428a877bdfde57f80493918690f6b10cb02f83ec
| 631
|
py
|
Python
|
sdk/synapse/azure-synapse-managedprivateendpoints/azure/synapse/managedprivateendpoints/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/synapse/azure-synapse-managedprivateendpoints/azure/synapse/managedprivateendpoints/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/synapse/azure-synapse-managedprivateendpoints/azure/synapse/managedprivateendpoints/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._vnet_client import VnetClient
__all__ = ['VnetClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| 37.117647
| 94
| 0.575277
|
77a6564d33a2fbfa1b100645e105c1be7e5c3eee
| 1,015
|
py
|
Python
|
testapp/wagtail_wordpress_importer/migrations/0011_auto_20210107_1334.py
|
nickmoreton/wagtail_wordpress_importer
|
fbe6b60ae624edac3f42a62ce30af4a0c548b4ed
|
[
"MIT"
] | null | null | null |
testapp/wagtail_wordpress_importer/migrations/0011_auto_20210107_1334.py
|
nickmoreton/wagtail_wordpress_importer
|
fbe6b60ae624edac3f42a62ce30af4a0c548b4ed
|
[
"MIT"
] | null | null | null |
testapp/wagtail_wordpress_importer/migrations/0011_auto_20210107_1334.py
|
nickmoreton/wagtail_wordpress_importer
|
fbe6b60ae624edac3f42a62ce30af4a0c548b4ed
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-01-07 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtail_wordpress_importer', '0010_historicalimportmedia_importmedia'),
]
operations = [
migrations.AlterModelOptions(
name='historicalimportmedia',
options={'get_latest_by': 'history_date', 'ordering': ('-history_date', '-history_id'), 'verbose_name': 'historical Media'},
),
migrations.AlterModelOptions(
name='importmedia',
options={'verbose_name': 'Media', 'verbose_name_plural': 'Media'},
),
migrations.AlterField(
model_name='historicalimportmedia',
name='post',
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name='importmedia',
name='post',
field=models.PositiveIntegerField(blank=True, default=0),
),
]
| 31.71875
| 136
| 0.612808
|
e10ffc4170013d48ebcbb853eadb1ac7849efc85
| 678
|
py
|
Python
|
blogtools/tests/test_blog/admin.py
|
ixc/glamkit-blogtools
|
a3024d983eabafeba5df789eacbcc389e4181866
|
[
"BSD-3-Clause"
] | null | null | null |
blogtools/tests/test_blog/admin.py
|
ixc/glamkit-blogtools
|
a3024d983eabafeba5df789eacbcc389e4181866
|
[
"BSD-3-Clause"
] | null | null | null |
blogtools/tests/test_blog/admin.py
|
ixc/glamkit-blogtools
|
a3024d983eabafeba5df789eacbcc389e4181866
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from feincms.admin.item_editor import FEINCMS_CONTENT_FIELDSET
from feincmstools.admin import FeinCMSDocumentAdmin
from blogtools.admin import CategoryEntryModelAdmin, CategoryModelAdmin
from .models import Entry, Category
class EntryAdmin(CategoryEntryModelAdmin, FeinCMSDocumentAdmin):
list_filter = list(CategoryEntryModelAdmin.list_filter) + ['category']
fieldsets = tuple(list(CategoryEntryModelAdmin.fieldsets) + [
('Appearance in listings', {
'fields': ('summary',),
}),
FEINCMS_CONTENT_FIELDSET,
])
admin.site.register(Category, CategoryModelAdmin)
admin.site.register(Entry, EntryAdmin)
| 39.882353
| 74
| 0.771386
|
dc4e0ccf23048896ae4691976aff93ec0429ddab
| 2,354
|
py
|
Python
|
test/test_scs_rand.py
|
SteveDiamond/scs-python
|
dfacf32d6d92fb4801f4ebc4eed5023a8afe5604
|
[
"MIT"
] | 33
|
2018-02-03T00:10:09.000Z
|
2022-01-01T20:34:25.000Z
|
test/test_scs_rand.py
|
SteveDiamond/scs-python
|
dfacf32d6d92fb4801f4ebc4eed5023a8afe5604
|
[
"MIT"
] | 55
|
2018-03-12T22:21:48.000Z
|
2022-02-18T14:39:07.000Z
|
test/test_scs_rand.py
|
SteveDiamond/scs-python
|
dfacf32d6d92fb4801f4ebc4eed5023a8afe5604
|
[
"MIT"
] | 24
|
2018-04-18T03:29:22.000Z
|
2022-03-01T13:30:36.000Z
|
from __future__ import print_function
import platform
## import utilities to generate random cone probs:
import sys
import gen_random_cone_prob as tools
def import_error(msg):
print()
print("## IMPORT ERROR:" + msg)
print()
try:
import pytest
except ImportError:
import_error("Please install pytest to run tests.")
raise
try:
import scs
except ImportError:
import_error("You must install the scs module before running tests.")
raise
try:
import numpy as np
from numpy.testing import assert_almost_equal
except ImportError:
import_error("Please install numpy.")
raise
try:
import scipy.sparse as sp
except ImportError:
import_error("Please install scipy.")
raise
def assert_(str1, str2):
if str1 != str2:
print("assert failure: %s != %s" % (str1, str2))
assert str1 == str2
def check_infeasible(sol):
assert_(sol['info']['status'], 'infeasible')
def check_unbounded(sol):
assert_(sol['info']['status'], 'unbounded')
np.random.seed(0)
num_feas = 50
num_unb = 10
num_infeas = 10
opts = {
'max_iters': 100000,
'eps_abs': 1e-5,
'eps_infeas': 1e-5,
}
K = {
"f": 10,
"l": 25,
"q": [5, 10, 0, 1],
"s": [2, 1, 2, 0, 1],
"ep": 0,
"ed": 0,
"p": [0.25, -0.75, 0.33, -0.33, 0.2],
}
m = tools.get_scs_cone_dims(K)
@pytest.mark.parametrize("use_indirect", [False, True])
def test_feasible(use_indirect):
for i in range(num_feas):
data, p_star = tools.gen_feasible(K, n=m // 3, density=0.1)
sol = scs.solve(data, K, use_indirect=use_indirect, **opts)
assert_almost_equal(np.dot(data["c"], sol["x"]), p_star, decimal=2)
assert_almost_equal(np.dot(-data["b"], sol["y"]), p_star, decimal=2)
@pytest.mark.parametrize("use_indirect", [False, True])
def test_infeasible(use_indirect):
for i in range(num_infeas):
data = tools.gen_infeasible(K, n=m // 2)
sol = scs.solve(data, K, use_indirect=use_indirect, **opts)
check_infeasible(sol)
# TODO: indirect solver has trouble in this test, so disable for now
@pytest.mark.parametrize("use_indirect", [False])
def test_unbounded(use_indirect):
for i in range(num_unb):
data = tools.gen_unbounded(K, n=m // 2)
sol = scs.solve(data, K, use_indirect=use_indirect, **opts)
check_unbounded(sol)
| 22.854369
| 76
| 0.649958
|
5a5a8fa5f33e6c6d43bf63b5090e6b8fc9bf4553
| 8,435
|
py
|
Python
|
Grand_Command00/mainMenu.py
|
CyborgVillager/Grand-Command
|
30800b8b512d26b7785d7bb7665e5f518a7cdf3f
|
[
"MIT"
] | null | null | null |
Grand_Command00/mainMenu.py
|
CyborgVillager/Grand-Command
|
30800b8b512d26b7785d7bb7665e5f518a7cdf3f
|
[
"MIT"
] | null | null | null |
Grand_Command00/mainMenu.py
|
CyborgVillager/Grand-Command
|
30800b8b512d26b7785d7bb7665e5f518a7cdf3f
|
[
"MIT"
] | null | null | null |
from gc_source_modules import *
# Load the images
def load_images(path_to_directory, height, width):
import os
import pygame
images = {}
for dirpath, dirnames, filenames in os.walk(path_to_directory):
for name in filenames:
if name.endswith('.png'):
key = name[:-4]
img = pygame.image.load(os.path.join(dirpath, name)).convert()
img = pygame.transform.scale(img, (int(640 / width), int(640 / height)))
images[key] = img
return images
# Generates a board using a height and a width
def gen_Board(board, height, width):
import random
for j in range(height):
for i in range(width):
percent = random.randint(1, 100)
if percent <= 50:
board[j][i] = "Grass"
else:
if percent <= 60:
board[j][i] = "Water"
elif percent <= 75:
board[j][i] = "Forest Lv1"
else:
board[j][i] = "Quarry Lv1"
return board
# main Menu Looping Background
def gen_Board(board, height, width):
import random
for j in range(height):
for i in range(width):
percent = random.randint(1, 200)
if percent <= 20:
board[j][i] = "Grass"
else:
if percent <= 55:
board[j][i] = "barracks"
if percent <= 60:
board[j][i] = "Water"
elif percent <= 105:
board[j][i] = "Forest Lv1"
elif percent <= 115:
board[j][i] = "Forest Lv3"
elif percent <= 135:
board[j][i] = "town_01"
elif percent <= 140:
board[j][i] = "town_02"
elif percent <= 143:
board[j][i] = "town_03"
elif percent <= 155:
board[j][i] = "town_04"
elif percent <= 156:
board[j][i] = "City_00"
elif percent <= 159:
board[j][i] = "Quarry Lv2"
elif percent <= 162:
board[j][i] = "Quarry Lv3"
elif percent <= 165:
board[j][i] = "Factory"
elif percent <= 170:
board[j][i] = "Solar_Power"
elif percent <= 175:
board[j][i] = "Super_Factory"
elif percent <= 180:
board[j][i] = "FishingBoat"
else:
board[j][i] = "Dam"
return board
# main Menu
def HomeScreen(pygame, gameDisplay, Fonts, clock, MusicPaused):
global AnimationStage, Count, Images
import main
run = True
screen = "main"
height = 20
width = 20
Images = []
Images = load_images("Images", 8, 8)
MenuBoard = gen_Board([[0] * height for _ in range(width)], height, width)
AnimationStage = {"Water": [1, 0.5], "FishingBoat": [1, 0.5], "Dam": [1, 0.5]}
x = 0
y = 0
while run:
gameDisplay.fill((0, 100, 255))
Count = {"Water": 0, "FishingBoat": 0, "Dam": 0, "Forest Lv4": 0, "Quarry Lv4": 0, "Super Factory": 0}
for j in range(height):
for i in range(width):
if MenuBoard[j][i] == "Water" or MenuBoard[j][i] == "FishingBoat" or MenuBoard[j][i] == "Dam":
Count["Water"] += 1
if MenuBoard[j][i] == "FishingBoat":
Count["FishingBoat"] += 1
if MenuBoard[j][i] == "Dam":
Count["Dam"] += 1
if MenuBoard[j][i] == "Forest Lv4":
Count["Forest Lv4"] += 1
if MenuBoard[j][i] == "Quarry Lv4":
Count["Quarry Lv4"] += 1
if MenuBoard[j][i] == "Super Factory":
Count["Super Factory"] += 1
# Drawing all the tiles plus some extra to make it loop seemlessly
for j in range(height):
for i in range(width):
main.draw(i * 80 + x, j * 80 + y, "Tile", MenuBoard[j][i], 8, 8, Images, AnimationStage, Count)
main.draw(i * 80 + x, j * 80 + y, "Tile", MenuBoard[j][i], 8, 8, Images, AnimationStage, Count)
main.draw(i * 80 + x + 1600, j * 80 + y, "Tile", MenuBoard[j][i], 8, 8, Images, AnimationStage, Count)
main.draw(i * 80 + x, j * 80 + y + 1600, "Tile", MenuBoard[j][i], 8, 8, Images, AnimationStage, Count)
main.draw(i * 80 + x + 1600, j * 80 + y + 1600, "Tile", MenuBoard[j][i], 8, 8, Images, AnimationStage,
Count)
# Moving around tiles on screen by -x & -y
x -= 2
y -= 2
if y < -1600:
y = 0
x = 0
x = 0
pos = pygame.mouse.get_pos()
# Event checking mainly for clicking on the buttons
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if 400 <= pos[0] <= 600 and 600 <= pos[1] <= 700 and screen == "main":
main.game_loop(8, 8, 0, False)
if 825 <= pos[0] <= 925 and 800 <= pos[1] <= 900 and screen == "main":
screen = "Options"
if 775 <= pos[0] <= 975 and pos[1] >= 675 and pos[1] <= 775 and screen == "Options":
screen = "main"
if 175 <= pos[0] <= 375 and 600 <= pos[1] <= 700 and screen == "main":
main.game_loop(8, 8, 0, True)
# Give user option to mute the background music
if pos[0] >= 200 and pos[0] <= 400 and pos[1] >= 50 and pos[1] <= 150 and screen == "Options":
if MusicPaused == False:
MusicPaused = True
pygame.mixer.music.pause()
else:
MusicPaused = False
pygame.mixer.music.unpause()
# Shows main screen text and buttons
if screen == "main":
text_surface, rect = Fonts[2].render("Grand Command", (242, 43, 35))
gameDisplay.blit(text_surface, (220, 50))
# mouse hover & un-hover location
if pos[0] >= 400 and pos[0] <= 600 and pos[1] >= 600 and pos[1] <= 700:
pygame.draw.rect(gameDisplay, (150, 0, 0), (400, 600, 200, 100), 0)
else:
pygame.draw.rect(gameDisplay, (255, 0, 0), (400, 600, 200, 100), 0)
text_surface, rect = Fonts[1].render(("New Game"), (0, 0, 0))
gameDisplay.blit(text_surface, (420, 630))
# mouse clickable to options -> menu for mute sound atm
if 825 <= pos[0] <= 925 and 850 <= pos[1] <= 900:
# box location not-hover
pygame.draw.rect(gameDisplay, (150, 0, 0), (840, 800, 200, 100), 0)
else:
# box location on-hover
pygame.draw.rect(gameDisplay, (255, 0, 0), (840, 800, 200, 100), 0)
text_surface, rect = Fonts[1].render("Options", (0, 0, 0))
gameDisplay.blit(text_surface, (880, 830))
# Shows the options menu
if screen == "Options":
if pos[0] >= 775 and pos[0] <= 975 and pos[1] >= 675 and pos[1] <= 775:
pygame.draw.rect(gameDisplay, (150, 0, 0), (775, 675, 200, 100), 0)
else:
pygame.draw.rect(gameDisplay, (255, 0, 0), (775, 675, 200, 100), 0)
text_surface, rect = Fonts[1].render("Back", (0, 0, 0))
gameDisplay.blit(text_surface, (835, 705))
if 200 <= pos[0] <= 400 and 50 <= pos[1] <= 150:
pygame.draw.rect(gameDisplay, (150, 0, 0), (200, 50, 200, 100), 0)
else:
pygame.draw.rect(gameDisplay, (255, 0, 0), (200, 50, 200, 100), 0)
if MusicPaused == False:
text_surface, rect = Fonts[1].render("Mute Music", (0, 0, 0))
gameDisplay.blit(text_surface, (210, 80))
else:
text_surface, rect = Fonts[0].render("Unmute Music", (0, 0, 0))
gameDisplay.blit(text_surface, (210, 86))
pygame.display.flip()
clock.tick(120)
| 40.552885
| 118
| 0.470658
|
905472b4b0415f551726ef2eeca56117b9c6218e
| 4,671
|
py
|
Python
|
MADDPG/train.py
|
holestine/Reinforcement-Learning
|
538c3a9089294890d320c72696f4440a79a2415a
|
[
"MIT"
] | 1
|
2018-11-07T21:32:42.000Z
|
2018-11-07T21:32:42.000Z
|
MADDPG/train.py
|
holestine/Reinforcement-Learning
|
538c3a9089294890d320c72696f4440a79a2415a
|
[
"MIT"
] | null | null | null |
MADDPG/train.py
|
holestine/Reinforcement-Learning
|
538c3a9089294890d320c72696f4440a79a2415a
|
[
"MIT"
] | null | null | null |
from unityagents import UnityEnvironment
import numpy as np
import torch
import matplotlib.pyplot as plt
from ddpg_agent import Agent
from buffer import ReplayBuffer
BUFFER_SIZE = int(1e6) # Replay buffer size
BATCH_SIZE = 1024 # Minibatch size
# Get the Unity environment for vector observations
env = UnityEnvironment(file_name="Tennis_Windows_x86_64/Tennis.exe")
# Get the Unity brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# Reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# Number of agents in the environment
num_agents = len(env_info.agents)
print('\nNumber of agents:', num_agents)
# Number of actions
action_size = brain.vector_action_space_size
print('\nNumber of actions:', action_size)
# State properties
state = env_info.vector_observations
state_size = len(state[0])
print('\nStates have length:', state_size)
print('\nStates look like:', state[0], '\n')
actors, critics = ['actor0.pth', 'actor1.pth'], ['critic0.pth', 'critic1.pth']
def maddpg(n_episodes=50000, max_t=500, print_every=100):
training_phase = 1
scores = []
add_noise=True
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name]
state = env_info.vector_observations
score = []
for i in range(num_agents): # Initialize scores
score.append(0)
for t in range(max_t):
action = []
for i in range(num_agents):
action.append(agent[i].act(np.reshape(state[i], ((1,state_size))), add_noise=add_noise)) # Get noisy action
env_info = env.step(np.reshape(action, (action_size*num_agents, 1)))[brain_name] # Perform a step in the environment
next_state = env_info.vector_observations # Get the new state (for each agent)
rewards = env_info.rewards # Get the reward (for each agent)
for i in range(num_agents):
memory[i].add(state[i], action[i], rewards[i], next_state[i], env_info.local_done[i]) # Save to replay buffer
agent[i].step(memory[i]) # Perform a step in the neural network
score[i] += rewards[i] # Update the score (for each agent)
state = next_state # Update state
if any(env_info.local_done): # Break if episode complete
break
scores.append(np.max(score))
# Save weights when score improves
try:
if scores[len(scores)-1] == np.max(scores):
torch.save(agent[0].actor_target.state_dict(), actors[0])
torch.save(agent[0].critic_target.state_dict(), critics[0])
torch.save(agent[1].actor_target.state_dict(), actors[1])
torch.save(agent[1].critic_target.state_dict(), critics[1])
if training_phase == 1 and np.max(scores) > 1:
training_phase = 2
for i in range(num_agents):
memory[i] = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, i)
agent[i].enable_major_update(False)
add_noise=False
except:
print("Failed to save weights on episode {}", i_episode)
# Send status to display
print('\r Episode {} \tAverage Score: {:.2f} \tMax Score: {:.2f} \tLast Score: {:.2f}'.format(i_episode, np.mean(scores[-100:]), np.max(scores), scores[-1]), end="")
if i_episode % print_every == 0:
print('\r Episode {} \tAverage Score: {:.2f} \tMax Score: {:.2f} \tLast Score: {:.2f}'.format(i_episode, np.mean(scores[-100:]), np.max(scores), scores[-1]))
if np.mean(scores[-100:]) > 1:
return scores
return scores
# Set up agent with appropriate sizes for the state and action spaces
agent = []
memory = []
for i in range(num_agents):
agent.append(Agent(state_size=state_size, action_size=action_size, batch_size=BATCH_SIZE, random_seed=i))
memory.append(ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, i))
scores = maddpg()
# Plot results
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
fig.savefig("images/training.png")
plt.show()
# Close the environment.
env.close()
| 39.923077
| 173
| 0.593021
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.