hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e972b70d6970fd1d6b55a67f98bddf5fb7dd6960 | 7,884 | py | Python | tools/driver/p4c_src/main.py | DTharun/p4lang | d83d078f95ae2eaf054955fb645fa4d4a7e54847 | [
"Apache-2.0"
] | 27 | 2018-06-30T13:01:40.000Z | 2022-01-28T09:07:21.000Z | tools/driver/p4c_src/main.py | DTharun/p4lang | d83d078f95ae2eaf054955fb645fa4d4a7e54847 | [
"Apache-2.0"
] | 8 | 2018-06-01T11:19:02.000Z | 2018-06-09T13:09:15.000Z | tools/driver/p4c_src/main.py | DTharun/p4lang | d83d078f95ae2eaf054955fb645fa4d4a7e54847 | [
"Apache-2.0"
] | 6 | 2019-02-24T11:20:22.000Z | 2021-05-25T17:21:07.000Z | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""
p4c - P4 Compiler Driver
"""
from __future__ import absolute_import
import argparse
import glob
import os
import sys
import re
import p4c_src.config as config
import p4c_src
# \TODO: let the backends set their versions ...
p4c_version = p4c_src.__version__
def set_version(ver):
global p4c_version
p4c_version = ver
def get_version():
return p4c_version
def display_supported_targets(cfg):
ret = "Supported targets in \"target-arch-vendor\" triplet:\n"
for target in cfg.target:
ret += str(target) + "\n"
return ret
def add_developer_options(parser):
parser.add_argument("-T", dest="log_levels",
action="append", default=[],
help="[Compiler debugging] Adjust logging level per file (see below)")
parser.add_argument("--top4", dest="passes",
action="append", default=[],
help="[Compiler debugging] Dump the P4 representation after \
passes whose name contains one of `passX' substrings. \
When '-v' is used this will include the compiler IR.")
parser.add_argument("--dump", dest="dump_dir", default=None,
help="[Compiler debugging] Folder where P4 programs are dumped.")
parser.add_argument("--toJson", dest="json", default=None,
help="Dump IR to JSON in the specified file.")
parser.add_argument("--pp", dest="pretty_print", default=None,
help="Pretty-print the program in the specified file.")
def main():
parser = argparse.ArgumentParser(conflict_handler='resolve')
parser.add_argument("-V", "--version", dest="show_version",
help="show version and exit",
action="store_true", default=False)
parser.add_argument("-v", "--debug", dest="debug",
help="verbose",
action="store_true", default=False)
parser.add_argument("-###", "--test-only", dest="dry_run",
help="print (but do not run) the commands",
action="store_true", default=False)
parser.add_argument("-Xpreprocessor", dest="preprocessor_options",
metavar="<arg>",
help="Pass <arg> to the preprocessor",
action="append", default=[])
parser.add_argument("-Xp4c", dest="compiler_options",
metavar="<arg>",
help="Pass <arg> to the compiler",
action="append", default=[])
parser.add_argument("-Xassembler", dest="assembler_options",
metavar="<arg>",
help="Pass <arg> to the assembler",
action="append", default=[])
parser.add_argument("-Xlinker", dest="linker_options",
metavar="<arg>",
help="Pass <arg> to the linker",
action="append", default=[])
parser.add_argument("-b", "--target", dest="backend",
help="specify target backend",
action="store", default="bmv2-ss-p4org")
parser.add_argument("-c", dest="run_all",
help="Only run preprocess, compile, and assemble steps",
action="store_true", default=True)
parser.add_argument("-D", dest="preprocessor_defines",
help="define a macro to be used by the preprocessor",
action="append", default=[])
parser.add_argument("-E", dest="run_preprocessor_only",
help="Only run the preprocessor",
action="store_true", default=False)
parser.add_argument("-e", dest="skip_preprocessor",
help="Skip the preprocessor",
action="store_true", default=False)
parser.add_argument("-g", dest="debug_info",
help="Generate debug information",
action="store_true", default=False)
parser.add_argument("-I", dest="search_path",
help="Add directory to include search path",
action="append", default=[])
parser.add_argument("-o", dest="output_directory",
help="Write output to the provided path",
action="store", metavar="PATH", default=".")
parser.add_argument("--p4runtime-file",
help="Write a P4Runtime control plane API description "
"to the specified file.",
action="store", default=None)
parser.add_argument("--p4runtime-format",
choices=["binary", "json", "text"],
help="Choose output format for the P4Runtime API "
"description (default is binary).",
action="store", default="binary")
parser.add_argument("--target-help", dest="show_target_help",
help="Display target specific command line options.",
action="store_true", default=False)
parser.add_argument("-S", dest="run_till_assembler",
help="Only run the preprocess and compilation steps",
action="store_true", default=False)
parser.add_argument("-x", dest="language",
choices = ["p4-14", "p4-16"],
help="Treat subsequent input files as having type language.",
action="store", default="p4-16")
if (os.environ['P4C_BUILD_TYPE'] == "DEVELOPER"):
add_developer_options(parser)
parser.add_argument("source_file", nargs='?', help="Files to compile", default=None)
# load supported configuration.
# We load these before we parse options, so that backends can register
# proprietary options
cfg_files = glob.glob("{}/*.cfg".format(os.environ['P4C_CFG_PATH']))
cfg = config.Config(config_prefix = "p4c")
for cf in cfg_files:
cfg.load_from_config(cf, parser)
# parse the arguments
opts = parser.parse_args()
# deal with early exits
if opts.show_version:
print "p4c", get_version()
sys.exit(0)
if opts.show_target_help:
print display_supported_targets(cfg)
sys.exit(0)
if not opts.source_file:
parser.error('No input specified.')
# check that the triplet value is correct
triplet = opts.backend.split('-')
if (len(triplet) != 3):
parser.error("Invalid target-arch-vendor triplet: {}\n{}".\
format(triplet, display_supported_targets(cfg)))
# find the backend
backend = None
for target in cfg.target:
regex = target._backend.replace('*', '[a-zA-Z0-9*]*')
pattern = re.compile(regex)
if (pattern.match(opts.backend)):
backend = target
break
if backend == None:
parser.error("Unknown backend: {}".format(str(opts.backend)))
# set all configuration and command line options for backend
backend.process_command_line_options(opts)
# run all commands
backend.run()
| 42.616216 | 94 | 0.57864 |
8ad3ab9f5e46a8c67bba479a1bf16e0eaf194126 | 8,458 | py | Python | tests/core/test_secrets.py | ShepardZhao/rancher | a747ac408ca34fb0bf465276f07557ec43bf9c89 | [
"Apache-2.0"
] | 1 | 2019-04-17T08:00:59.000Z | 2019-04-17T08:00:59.000Z | tests/core/test_secrets.py | ShepardZhao/rancher | a747ac408ca34fb0bf465276f07557ec43bf9c89 | [
"Apache-2.0"
] | 1 | 2021-02-23T19:12:08.000Z | 2021-02-23T19:12:08.000Z | tests/core/test_secrets.py | ShepardZhao/rancher | a747ac408ca34fb0bf465276f07557ec43bf9c89 | [
"Apache-2.0"
] | 2 | 2019-11-14T15:46:01.000Z | 2020-05-06T15:31:37.000Z | from .common import random_str
CERT = """-----BEGIN CERTIFICATE-----
MIIDEDCCAfgCCQC+HwE8rpMN7jANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJV
UzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hlciBMYWJzMRIwEAYD
VQQDEwlsb2NhbGhvc3QwHhcNMTYwNjMwMDExMzMyWhcNMjYwNjI4MDExMzMyWjBK
MQswCQYDVQQGEwJVUzEQMA4GA1UECBMHQXJpem9uYTEVMBMGA1UEChMMUmFuY2hl
ciBMYWJzMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQC1PR0EiJjM0wbFQmU/yKSb7AuQdzhdW02ya+RQe+31/B+sOTMr
z9b473KCKf8LiFKFOIQUhR5fPvwyrrIWKCEV9pCp/wM474fX32j0zYaH6ezZjL0r
L6hTeGFScGse3dk7ej2+6nNWexpujos0djFi9Gu11iVHIJyT2Sx66kPPPZVRkJO9
5Pfetm5SLIQtJHUwy5iWv5Br+AbdXlUAjTYUqS4mhKIIbblAPbOKrYRxGXX/6oDV
J5OGLle8Uvlb8poxqmy67FPyMObNHhjggKwboXhmNuuT2OGf/VeZANMYubs4JP2V
ZLs3U/1tFMAOaQM+PbT9JuwMSmGYFX0Qiuh/AgMBAAEwDQYJKoZIhvcNAQEFBQAD
ggEBACpkRCQpCn/zmTOwboBckkOFeqMVo9cvSu0Sez6EPED4WUv/6q5tlJeHekQm
6YVcsXeOMkpfZ7qtGmBDwR+ly7D43dCiPKplm0uApO1CkogG5ePv0agvKHEybd36
xu9pt0fnxDdrP2NrP6trHq1D+CzPZooLRfmYqbt1xmIb00GpnyiJIUNuMu7GUM3q
NxWGK3eq+1cyt6xr8nLOC5zaGeSyZikw4+9vqLudNSyYdnw9mdHtrYT0GlcEP1Vc
NK+yrhDCvEWH6+4+pp8Ve2P2Le5tvbA1m24AxyuC9wHS5bUmiNHweLXNpxLFTjK8
BBUi6y1Vm9jrDi/LiiHcN4sJEoU=
-----END CERTIFICATE-----"""
KEY = """-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAtT0dBIiYzNMGxUJlP8ikm+wLkHc4XVtNsmvkUHvt9fwfrDkz
K8/W+O9ygin/C4hShTiEFIUeXz78Mq6yFighFfaQqf8DOO+H199o9M2Gh+ns2Yy9
Ky+oU3hhUnBrHt3ZO3o9vupzVnsabo6LNHYxYvRrtdYlRyCck9kseupDzz2VUZCT
veT33rZuUiyELSR1MMuYlr+Qa/gG3V5VAI02FKkuJoSiCG25QD2ziq2EcRl1/+qA
1SeThi5XvFL5W/KaMapsuuxT8jDmzR4Y4ICsG6F4Zjbrk9jhn/1XmQDTGLm7OCT9
lWS7N1P9bRTADmkDPj20/SbsDEphmBV9EIrofwIDAQABAoIBAGehHxN1i3EqhKeL
9FrJPh4NlPswwCDZUQ7hFDZU9lZ9qBqQxkqZ18CVIXN90eBlPVIBY7xb9Wbem9Pb
AecbYPeu+T7KmqwWgiUUEG5RikfyoMQv7gZghK3dmkBKGWYX0dtpZR7h7bsYPp/S
j5QatNhxC5l4be5CnmUHe6B4jPdUt8kRfTj0ukYGm/h3cOm/tEQeRYIIN/N6JN2Z
JWYzsyqGmlOTp7suczkRIUS0AjiljT1186bQSou62iMtMqEgArusFFb9m/dXCCYo
t/Q1SR4lRodDfzcF/CRbdR/ZC8gZlyCdbI4WHOw9IwwHnmrllx4MXFP/p6p+gEtl
cKMzHXECgYEA27KnkDnz338qKC2cCGkMf3ARfTX6gSlqmvgM9zOa8FLWp6GR6Rvo
NgVLUi63bQqv9D5qYSsweAp1QTvIxJffWMJDTWtxowOXVW5P8WJ8jp/pAXoWGRbd
pnavy6Ih0XT57huwT7fGGIikXYfw/kB85PPJL3FsT/b6G4ay2+Z7OGkCgYEA0y+d
bxUewYZkpNy7+kIh0x4vrJvNqSL9ZwiP2R159zu7zDwDph/fkhXej0FEtbXybt+O
4s9M3l4nNsY6AS9sIPCB5SxWguhx0z76U5cz1qFFZwIHtL8r1jHrl5iwkVyOAtVV
0BokmJG4Pn07yZo/iCmSTEfwcePvCMvOsPtcvKcCgYEAu5+SbKChfhBaz19MLv6P
ttHdjcIogl/9dAU9BWxj+LO2MAjS1HKJ2ICi97d/3LbQ19TqArvgs9OymZhV+Fb/
Xgzhb1+/94icmFASI8KJP0CfvCwobRrTBlO8BDsdiITO4SNyalI28kLXpCzxiiFG
yDzOZx8FcjEpHZLmctgeCWkCgYAO0rDCM0FNZBl8WOH41tt47g16mBT/Yi1XJgqy
upbs+4xa8XtwFZyjrFVKyNIBzxuNHLPyx4olsYYfGhrIKoP0a+0yIMKRva7/nNQF
Of+xePBeIo5X6XMyPZ7DrTv3d/+fw0maqbsX2mKMQE4KAIGlFQXnxMTjuZP1khiX
44zG0QKBgGwQ8T4DGZK5ukLQmhLi9npCaAW99s/uuKArMzAG9xd/I8YntM/kVY0V
VUi3lKqwXhtReYdrqVTPdjnyGIYIGGNRD7EKqQe15IRfbpy536DSN+LvL65Fdyis
iNITDKNP1H3hedFNFfbTGpueYdRX6QaptK4+NB4+dOm7hn8iqq7U
-----END RSA PRIVATE KEY-----"""
def test_secrets(admin_pc):
client = admin_pc.client
name = random_str()
secret = client.create_secret(name=name, stringData={
'foo': 'bar'
})
assert secret.type == 'secret'
assert secret.kind == 'Opaque'
assert secret.name == name
assert secret.data.foo == 'YmFy'
secret.data.baz = 'YmFy'
secret = client.update(secret, data=secret.data)
secret = client.reload(secret)
assert secret.baseType == 'secret'
assert secret.type == 'secret'
assert secret.kind == 'Opaque'
assert secret.name == name
assert secret.data.foo == 'YmFy'
assert secret.data.baz == 'YmFy'
assert secret.namespaceId is None
assert 'namespace' not in secret.data
assert secret.projectId == admin_pc.project.id
found = False
for i in client.list_secret():
if i.id == secret.id:
found = True
break
assert found
client.delete(secret)
def test_certificates(admin_pc):
client = admin_pc.client
name = random_str()
cert = client.create_certificate(name=name, key=KEY, certs=CERT)
assert cert.baseType == 'secret'
assert cert.expiresAt == '2026-06-28T01:13:32Z'
assert cert.type == 'certificate'
assert cert.name == name
assert cert.certs == CERT
assert cert.namespaceId is None
assert 'namespace' not in cert
# cert = client.update(cert, certs='certdata2')
# cert = client.reload(cert)
#
# assert cert.baseType == 'secret'
# assert cert.type == 'certificate'
# assert cert.name == name
# assert cert.certs == 'certdata2'
# assert cert.namespaceId is None
# assert 'namespace' not in cert
# assert cert.projectId == pc.project.id
found = False
for i in client.list_certificate():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_certificate(cert.id)
assert cert is not None
client.delete(cert)
def test_docker_credential(admin_pc):
client = admin_pc.client
name = random_str()
registries = {'index.docker.io': {
'username': 'foo',
'password': 'bar',
}}
cert = client.create_docker_credential(name=name,
registries=registries)
assert cert.baseType == 'secret'
assert cert.type == 'dockerCredential'
assert cert.name == name
assert cert.registries['index.docker.io'].username == 'foo'
assert 'password' in cert.registries['index.docker.io']
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
registries['two'] = {
'username': 'blah'
}
cert = client.update(cert, registries=registries)
cert = client.reload(cert)
assert cert.baseType == 'secret'
assert cert.type == 'dockerCredential'
assert cert.name == name
assert cert.registries['index.docker.io'].username == 'foo'
assert cert.registries.two.username == 'blah'
assert 'password' not in cert.registries['index.docker.io']
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_docker_credential():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_docker_credential(cert.id)
assert cert is not None
client.delete(cert)
def test_basic_auth(admin_pc):
client = admin_pc.client
name = random_str()
cert = client.create_basic_auth(name=name,
username='foo',
password='bar')
assert cert.baseType == 'secret'
assert cert.type == 'basicAuth'
assert cert.name == name
assert cert.username == 'foo'
assert 'password' in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
cert = client.update(cert, username='foo2')
cert = client.reload(cert)
assert cert.baseType == 'secret'
assert cert.type == 'basicAuth'
assert cert.name == name
assert cert.username == 'foo2'
assert 'password' not in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_basic_auth():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_basic_auth(cert.id)
assert cert is not None
client.delete(cert)
def test_ssh_auth(admin_pc):
client = admin_pc.client
name = random_str()
cert = client.create_ssh_auth(name=name,
privateKey='foo')
assert cert.baseType == 'secret'
assert cert.type == 'sshAuth'
assert cert.name == name
assert 'privateKey' in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
cert = client.update(cert, privateKey='foo2')
cert = client.reload(cert)
assert cert.baseType == 'secret'
assert cert.type == 'sshAuth'
assert cert.name == name
assert 'privateKey' not in cert
assert cert.namespaceId is None
assert 'namespace' not in cert
assert cert.projectId == admin_pc.project.id
found = False
for i in client.list_ssh_auth():
if i.id == cert.id:
found = True
break
assert found
cert = client.by_id_ssh_auth(cert.id)
assert cert is not None
client.delete(cert)
| 33.039063 | 68 | 0.731497 |
8f037fb6cbc1907357d41dbef7cebde6c3f2bba5 | 594 | py | Python | modoboa/admin/migrations/0011_domain_transport.py | Makakken-Crew/modoboa | faac20fe36e26bd7c01edc2adb12312dda4cfaa2 | [
"ISC"
] | 2 | 2021-04-20T19:40:09.000Z | 2021-04-20T20:23:57.000Z | modoboa/admin/migrations/0011_domain_transport.py | vavilon/modoboa | 9fcd133bc883a94cbf66c5bc9687787caadc8ca2 | [
"0BSD"
] | 19 | 2021-05-05T03:31:52.000Z | 2021-12-11T09:58:52.000Z | modoboa/admin/migrations/0011_domain_transport.py | vavilon/modoboa | 9fcd133bc883a94cbf66c5bc9687787caadc8ca2 | [
"0BSD"
] | 1 | 2020-01-10T11:43:00.000Z | 2020-01-10T11:43:00.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-12-20 14:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('transport', '0001_initial'),
('admin', '0010_auto_20170215_1328'),
]
operations = [
migrations.AddField(
model_name='domain',
name='transport',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='transport.Transport'),
),
]
| 25.826087 | 122 | 0.648148 |
61d077aa9699b010c0112c50d2bb5210c65b0e8b | 556 | py | Python | firehose/functions/firehose/main.py | pei0804/serverless-sample | 6ac55469aed7cf0d6e9f57109f0852122a951906 | [
"MIT"
] | null | null | null | firehose/functions/firehose/main.py | pei0804/serverless-sample | 6ac55469aed7cf0d6e9f57109f0852122a951906 | [
"MIT"
] | null | null | null | firehose/functions/firehose/main.py | pei0804/serverless-sample | 6ac55469aed7cf0d6e9f57109f0852122a951906 | [
"MIT"
] | null | null | null | import boto3
def lambda_handler(event, context):
firehose = boto3.client('firehose')
firehose.put_record(
DeliveryStreamName='firehose',
Record={
'Data': b'{"user_id": "a", "event": "AAA"}\n'
}
)
firehose.put_record(
DeliveryStreamName='firehose',
Record={
'Data': b'{"user_id": "b", "event": "BBB"}\n'
}
)
firehose.put_record(
DeliveryStreamName='firehose',
Record={
'Data': b'{"user_id": "c", "event": "CCC"}\n'
}
)
| 23.166667 | 57 | 0.508993 |
6b0be06b15cb9e6ce1469770bcfd2a0046cd5520 | 404 | py | Python | core/migrations/0009_auto_20220115_2012.py | timehaswings/aoi_backend | 17fa315d250022455e38235a10656ddaae4db655 | [
"Apache-2.0"
] | 1 | 2021-12-22T01:48:02.000Z | 2021-12-22T01:48:02.000Z | core/migrations/0009_auto_20220115_2012.py | timehaswings/aoi_backend | 17fa315d250022455e38235a10656ddaae4db655 | [
"Apache-2.0"
] | null | null | null | core/migrations/0009_auto_20220115_2012.py | timehaswings/aoi_backend | 17fa315d250022455e38235a10656ddaae4db655 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.7 on 2022-01-15 20:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_groupmenu'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='url',
field=models.CharField(blank=True, max_length=160, verbose_name='菜单URL'),
),
]
| 21.263158 | 85 | 0.596535 |
fd21a1d60885c7ee0db39f822ef0fb571f20567b | 24,169 | py | Python | code/run_fewrel.py | CJWBW/my_erine | 9cedbcb2efcdfe4e7cb0aabaff3a53e971131ebb | [
"MIT"
] | null | null | null | code/run_fewrel.py | CJWBW/my_erine | 9cedbcb2efcdfe4e7cb0aabaff3a53e971131ebb | [
"MIT"
] | null | null | null | code/run_fewrel.py | CJWBW/my_erine | 9cedbcb2efcdfe4e7cb0aabaff3a53e971131ebb | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import simplejson as json
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from knowledge_bert.tokenization import BertTokenizer
from knowledge_bert.modeling import BertForSequenceClassification
from knowledge_bert.optimization import BertAdam
from knowledge_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, input_ent, ent_mask, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.input_ent = input_ent
self.ent_mask = ent_mask
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding='utf-8') as f:
return json.loads(f.read())
class FewrelProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
examples = self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
labels = set([x.label for x in examples])
return examples, list(labels)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "dev.json")), "dev")
def get_labels(self):
"""Useless"""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
for x in line['ents']:
if x[1] == 1:
x[1] = 0
text_a = (line['text'], line['ents'])
label = line['label']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, threshold):
"""Loads a data file into a list of `InputBatch`s."""
label_list = sorted(label_list)
label_map = {label: i for i, label in enumerate(label_list)}
entity2id = {}
with open("kg_embed/entity2id.txt") as fin:
fin.readline()
for line in fin:
qid, eid = line.strip().split('\t')
entity2id[qid] = int(eid)
features = []
for (ex_index, example) in enumerate(examples):
ex_text_a = example.text_a[0]
h, t = example.text_a[1]
h_name = ex_text_a[h[1]:h[2]]
t_name = ex_text_a[t[1]:t[2]]
# Add [HD] and [TL], which are "#" and "$" respectively.
if h[1] < t[1]:
ex_text_a = ex_text_a[:h[1]] + "# " + h_name + " #" + ex_text_a[
h[2]:t[1]] + "$ " + t_name + " $" + ex_text_a[t[2]:]
else:
ex_text_a = ex_text_a[:t[1]] + "$ " + t_name + " $" + ex_text_a[
t[2]:h[1]] + "# " + h_name + " #" + ex_text_a[h[2]:]
if h[1] < t[1]:
h[1] += 2
h[2] += 2
t[1] += 6
t[2] += 6
else:
h[1] += 6
h[2] += 6
t[1] += 2
t[2] += 2
tokens_a, entities_a = tokenizer.tokenize(ex_text_a, [h, t])
if len([x for x in entities_a if x != "UNK"]) != 2:
print(entities_a, len([x for x in entities_a if x[0] != "UNK"]))
exit(1)
tokens_b = None
if example.text_b:
tokens_b, entities_b = tokenizer.tokenize(example.text_b[0],
[x for x in example.text_b[1] if x[-1] > threshold])
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, entities_a, entities_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
entities_a = entities_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
ents = ["UNK"] + entities_a + ["UNK"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
ents += entities_b + ["UNK"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ent = []
ent_mask = []
for ent in ents:
if ent != "UNK" and ent in entity2id:
input_ent.append(entity2id[ent])
ent_mask.append(1)
else:
input_ent.append(-1)
ent_mask.append(0)
ent_mask[0] = 1
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
padding_ = [-1] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
input_ent += padding_
ent_mask += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(input_ent) == max_seq_length
assert len(ent_mask) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("ents: %s" % " ".join(
[str(x) for x in ents]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ent=input_ent,
ent_mask=ent_mask,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, ents_a, ents_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
ents_a.pop()
else:
tokens_b.pop()
ents_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
return 1.0
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--ernie_model", default=None, type=str, required=True,
help="Ernie pre-trained model")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--threshold', type=float, default=.3)
args = parser.parse_args()
processors = FewrelProcessor
num_labels_task = 80
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
processor = processors()
num_labels = num_labels_task
label_list = None
tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
train_examples, label_list = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(
args.local_rank),
num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent']
param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if args.local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=t_total)
global_step = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, args.threshold)
vecs = []
vecs.append([0] * 100)
with open("kg_embed/entity2vec.vec", 'r') as fin:
for line in fin:
vec = line.strip().split('\t')
vec = [float(x) for x in vec]
vecs.append(vec)
embed = torch.FloatTensor(vecs)
embed = torch.nn.Embedding.from_pretrained(embed)
# embed = torch.nn.Embedding(5041175, 100)
logger.info("Shape of entity embedding: " + str(embed.weight.size()))
del vecs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long)
all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks,
all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
output_loss_file = os.path.join(args.output_dir, "loss")
loss_fout = open(output_loss_file, 'w')
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch))
input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch
input_ent = embed(input_ent + 1).to(device) # -1 -> 0
loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
loss_fout.write("{}\n".format(loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = args.learning_rate * warmup_linear(global_step / t_total, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step))
torch.save(model_to_save.state_dict(), output_model_file)
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if __name__ == "__main__":
main()
| 42.776991 | 127 | 0.582771 |
18a22f9ecd12b8cd2ba070dcb05f2e55ef3f8d64 | 86 | py | Python | mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 1,953 | 2015-01-17T20:33:46.000Z | 2022-03-30T04:36:34.000Z | mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 8,490 | 2015-01-01T13:04:18.000Z | 2022-03-31T23:02:08.000Z | mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 1,130 | 2015-01-08T22:39:27.000Z | 2022-03-30T21:44:26.000Z | """MNE visual_92_categories dataset."""
from .kiloword import data_path, get_version
| 21.5 | 44 | 0.790698 |
9fe809b289dcd40d88c83e33a194fe713490761a | 6,367 | py | Python | redis_wikicache.py | radomd92/botjagwar | 1dc96600c40041057a9f9afde38c31ca34b8db38 | [
"MIT"
] | 7 | 2015-01-23T17:24:04.000Z | 2022-01-12T16:54:24.000Z | redis_wikicache.py | radomd92/botjagwar | 1dc96600c40041057a9f9afde38c31ca34b8db38 | [
"MIT"
] | 18 | 2017-12-09T01:11:23.000Z | 2021-09-22T13:26:24.000Z | redis_wikicache.py | radomd92/botjagwar | 1dc96600c40041057a9f9afde38c31ca34b8db38 | [
"MIT"
] | 1 | 2015-06-22T02:17:55.000Z | 2015-06-22T02:17:55.000Z | import pywikibot
import redis
from api.config import BotjagwarConfig
from api.decorator import separate_process
from import_wiktionary import EnWiktionaryDumpImporter
config = BotjagwarConfig()
class NoPage(Exception):
pass
class RedisSite(object):
def __init__(
self,
language: str,
wiki: str,
host='default',
port=6379,
password='default'):
self.language = language
self.wiki = wiki
if host == 'default':
self.host = config.get('host', 'redis')
else:
self.host = host
if password == 'default':
self.password = config.get('password', 'redis')
if not self.password:
self.password = None
else:
self.password = None
self.port = port
self.instance = redis.Redis(
self.host,
self.port,
password=self.password,
socket_timeout=3)
def random_page(self):
rkey = self.instance.randomkey()
while not rkey.startswith(
bytes(
f'{self.wiki}.{self.language}/',
'utf8')):
rkey = self.instance.randomkey()
page_name = str(
rkey, encoding='utf8').replace(
f'{self.wiki}.{self.language}/', '')
return RedisPage(self, page_name)
@separate_process
def load_xml_dump(self, dump='user_data/dumps/enwikt.xml'):
importer = EnWiktionaryDumpImporter(dump)
for xml_page in importer.load():
try:
title, content = importer.get_page_from_xml(xml_page)
self.push_page(title, content)
except redis.ConnectionError as error:
print(error)
except Exception as error:
print('Unknown error ', error)
def push_page(self, title: str, content: str):
if title is not None and content is not None:
self.instance.set(f'{self.wiki}.{self.language}/{title}', content)
def __str__(self):
return f'{self.wiki}.{self.language}'
class RedisPage(object):
def __init__(self, site: RedisSite, title: str, offline=True):
self.offline = offline
self.site = site
self._title = title
def title(self, *args):
return self._title
def __repr__(self):
return f'Page({self.site}/{self.title()})'
def isEmpty(self):
return self.get() == ''
def get(self):
if self._title is None:
return ''
cache_contents = self.site.instance.get(
f'{self.site.wiki}.{self.site.language}/{self._title}')
if not cache_contents:
if not self.offline:
wikisite = pywikibot.Site(self.site.language, self.site.wiki)
wikipage = pywikibot.Page(wikisite, self._title)
if wikipage.exists():
content = wikipage.get()
self.site.push_page(self._title, content)
return content
else:
raise NoPage(
f'Page {self._title} at {self.site} not found '
f'neither in-redis nor on-wiki')
else:
raise NoPage(
f'Page {self._title} at {self.site} not found in redis. '
f'Offline mode is OFF so no on-wiki fetching.')
else:
cache_contents = str(cache_contents, encoding='utf8')
return cache_contents
def exists(self):
cache_contents = self.site.instance.get(
f'{self.site.wiki}.{self.site.language}/{self._title}')
if not cache_contents:
if self.offline:
return False
else:
wikisite = pywikibot.Site(self.site.language, self.site.wiki)
wikipage = pywikibot.Page(wikisite, self._title)
return wikipage.exists()
else:
return True
def namespace(self):
if self.offline:
class Namespace(object):
content = self.get()
return Namespace()
else:
wikisite = pywikibot.Site(self.site.language, self.site.wiki)
wikipage = pywikibot.Page(wikisite, self._title)
return getattr(wikipage, 'namespace')()
def isRedirectPage(self):
if self.exists():
return '#REDIRECT [[' in self.get()
else:
if not self.offline:
wikisite = pywikibot.Site(self.site.language, self.site.wiki)
wikipage = pywikibot.Page(wikisite, self._title)
return wikipage.isRedirectPage()
return False
def __getattr__(self, item):
if hasattr(RedisPage, item):
return getattr(self, item)
else:
wikisite = pywikibot.Site(self.site.language, self.site.wiki)
wikipage = pywikibot.Page(wikisite, self._title)
return getattr(wikipage, item)
if __name__ == '__main__':
print("""
Download the en.wiktionary page dumps,
split it into several chunks (e.g. using split) and run this script.
All en.wiktionary pages will have their latest version uploaded in your Redis.
Using RedisSite and RedisPage, you'll have a much faster read and offline access.
""")
site = RedisSite('en', 'wiktionary')
site.load_xml_dump('user_data/dumps/enwikt.xml')
# site.load_xml_dump('user_data/dumps/enwikt_2.xml')
# site.load_xml_dump('user_data/dumps/enwikt_3.xml')
# site.load_xml_dump('user_data/dumps/enwikt_4.xml')
# site.load_xml_dump('user_data/dumps/enwikt_5.xml')
# site.load_xml_dump('user_data/dumps/enwikt_6.xml')
# site.load_xml_dump('user_data/dumps/enwikt_7.xml')
# site.load_xml_dump('user_data/dumps/enwikt_8.xml')
# site.load_xml_dump('user_data/dumps/enwikt_9.xml')
# site.load_xml_dump('user_data/dumps/enwikt_10.xml')
# site.load_xml_dump('user_data/dumps/enwikt_11.xml')
# site.load_xml_dump('user_data/dumps/enwikt_13.xml')
# site.load_xml_dump('user_data/dumps/enwikt_14.xml')
# site.load_xml_dump('user_data/dumps/enwikt_15.xml')
# site.load_xml_dump('user_data/dumps/enwikt_16.xml')
# site.load_xml_dump('user_data/dumps/enwikt_17.xml')
| 34.416216 | 85 | 0.586619 |
b2329006106df9a6fd2a3a4aef8a9cca3bde2071 | 59 | py | Python | masonite/__init__.py | josephmancuso/masonite-azure-driver | 06e766f9833b48d28a6ffd4afb24114be1a60a1c | [
"MIT"
] | 1 | 2018-12-08T07:07:37.000Z | 2018-12-08T07:07:37.000Z | masonite/__init__.py | vaibhavmule/masonite-cloudinary-driver | 866b073717144b8e4755495a01cd4da20d295eaf | [
"MIT"
] | null | null | null | masonite/__init__.py | vaibhavmule/masonite-cloudinary-driver | 866b073717144b8e4755495a01cd4da20d295eaf | [
"MIT"
] | 1 | 2019-08-07T16:53:09.000Z | 2019-08-07T16:53:09.000Z | from .contrib import drivers
from .contrib import providers | 29.5 | 30 | 0.847458 |
af0e056d6f71bb1927c037ea383c3b625321d406 | 2,180 | py | Python | topaz/modules/ffi/function.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | 6 | 2015-04-10T20:11:03.000Z | 2021-11-10T07:03:46.000Z | topaz/modules/ffi/function.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | 1 | 2017-01-24T10:17:55.000Z | 2017-01-24T10:17:55.000Z | topaz/modules/ffi/function.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | 2 | 2017-01-23T18:47:35.000Z | 2019-11-13T08:31:18.000Z | import sys
from topaz.module import ClassDef
from topaz.modules.ffi import type as ffitype
from topaz.modules.ffi.pointer import W_PointerObject
from topaz.modules.ffi.dynamic_library import coerce_dl_symbol
from topaz.modules.ffi.function_type import W_FunctionTypeObject
from topaz.objects.moduleobject import W_FunctionObject
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import jit
from rpython.rlib.jit_libffi import CIF_DESCRIPTION
from rpython.rlib.jit_libffi import FFI_TYPE_PP
for i, name in enumerate(ffitype.type_names):
globals()[name] = i
class W_FFIFunctionObject(W_PointerObject):
classdef = ClassDef('FFI::Function', W_PointerObject.classdef)
_immutable_fields_ = ['ptr']
@classdef.singleton_method('allocate')
def singleton_method_allocate(self, space, args_w):
return W_FFIFunctionObject(space)
def __init__(self, space):
W_PointerObject.__init__(self, space)
self.ptr = lltype.nullptr(rffi.VOIDP.TO)
@classdef.method('initialize')
def method_initialize(self, space, w_ret_type, w_arg_types,
w_handle=None, w_options=None):
self.w_info = space.send(space.getclassfor(W_FunctionTypeObject),
'new', [w_ret_type, w_arg_types, w_options])
self.setup(space, w_handle)
def setup(self, space, w_handle):
self.ptr = (coerce_dl_symbol(space, w_handle) if w_handle
else lltype.nullptr(rffi.VOIDP.TO))
@classdef.method('call')
def method_call(self, space, args_w, block=None):
return self.w_info.invoke(space, self.ptr, args_w, block)
@classdef.method('attach', name='str')
def method_attach(self, space, w_lib, name):
w_lib.attach_method(space, name, W_MethodAdapter(name, self))
class W_MethodAdapter(W_FunctionObject):
_immutable_fields_ = ['name', 'w_ffi_func']
def __init__(self, name, w_ffi_func):
W_FunctionObject.__init__(self, name)
self.name = name
self.w_ffi_func = w_ffi_func
def call(self, space, w_receiver, args_w, block):
return space.send(self.w_ffi_func, 'call', args_w, block)
| 36.949153 | 77 | 0.713761 |
d75b4cbc06c6d60918f9ffe5918cd5ca78a252c1 | 1,286 | py | Python | tests/utils/model_space_test.py | HSE-LAMBDA/modelgym | 7f8086c716a014852c1c91c6871bf75da6463ceb | [
"Apache-2.0"
] | 18 | 2017-06-11T01:04:19.000Z | 2022-03-09T04:45:19.000Z | tests/utils/model_space_test.py | HSE-LaMBDA/modelgym | 7f8086c716a014852c1c91c6871bf75da6463ceb | [
"Apache-2.0"
] | 37 | 2017-06-12T22:33:12.000Z | 2019-03-07T08:20:43.000Z | tests/utils/model_space_test.py | HSE-LaMBDA/modelgym | 7f8086c716a014852c1c91c6871bf75da6463ceb | [
"Apache-2.0"
] | 20 | 2017-07-18T17:19:02.000Z | 2019-04-09T16:22:53.000Z | import pytest
from modelgym.utils import ModelSpace, process_model_spaces
from modelgym.models import LGBMClassifier, RFClassifier
def test_process_model_spaces():
rf_model_space = ModelSpace(RFClassifier, name="RF")
processed = process_model_spaces([LGBMClassifier, rf_model_space])
assert len(processed) == 2
assert all(map(lambda ms: isinstance(ms, ModelSpace), processed.values()))
assert all(map(lambda kv: kv[0] == kv[1].name, processed.items()))
assert processed["LGBMClassifier"].model_class == LGBMClassifier
assert processed["RF"] == rf_model_space
def test_process_model_spaces_unwrap():
processed = process_model_spaces(LGBMClassifier)
assert isinstance(processed, dict)
assert len(processed) == 1
assert all(map(lambda ms: isinstance(ms, ModelSpace), processed.values()))
assert all(map(lambda kv: kv[0] == kv[1].name, processed.items()))
assert processed["LGBMClassifier"].model_class == LGBMClassifier
def test_process_model_space_value_errors():
with pytest.raises(ValueError):
process_model_spaces([RFClassifier()])
with pytest.raises(ValueError):
process_model_spaces(LGBMClassifier())
with pytest.raises(ValueError):
process_model_spaces([LGBMClassifier, LGBMClassifier])
| 38.969697 | 78 | 0.746501 |
7a52e70c3728755daa51b7cbe7ac413e27f720e3 | 4,988 | py | Python | ooobuild/lo/sdbc/row_set.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/sdbc/row_set.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/sdbc/row_set.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.sdbc
import typing
from abc import abstractproperty
from .result_set import ResultSet as ResultSet_8ecf0a4f
from .x_parameters import XParameters as XParameters_a36c0b10
from .x_row_set import XRowSet as XRowSet_7a090960
if typing.TYPE_CHECKING:
from ..container.x_name_access import XNameAccess as XNameAccess_e2ab0cf6
class RowSet(ResultSet_8ecf0a4f, XParameters_a36c0b10, XRowSet_7a090960):
"""
Service Class
is a client side ResultSet, which combines the characteristics of a com.sun.star.sdbc.Statement and a com.sun.star.sdbc.ResultSet.
It acts like a typical bean. Before you use the RowSet, you have to specify a set of properties like a DataSource and a Command and other properties known of Statement.
Afterwards, you can populate the RowSet by its execute method to fill the set with data.
On the one hand, a RowSet can be used as a short cut to retrieve the data of a DataSource. You don't have to establish a connection, create a Statement, and then create a ResultSet. On the other hand, a row set can be used to implement capabilities for a result set, which are not supported by a driver result set, like caching strategies or update capabilities.
See Also:
`API RowSet <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1sdbc_1_1RowSet.html>`_
"""
__ooo_ns__: str = 'com.sun.star.sdbc'
__ooo_full_ns__: str = 'com.sun.star.sdbc.RowSet'
__ooo_type_name__: str = 'service'
@abstractproperty
def Command(self) -> str:
"""
is the command which should be executed.
"""
@abstractproperty
def DataSourceName(self) -> str:
"""
is the name of a named datasource to use.
"""
@abstractproperty
def EscapeProcessing(self) -> bool:
"""
returns if escape processing is on or off.
If escape scanning is on (the default), the driver will do escape substitution before sending the SQL to the database. This is only evaluated, if the CommandType is COMMAND.
"""
@abstractproperty
def MaxFieldSize(self) -> int:
"""
returns the maximum number of bytes allowed for any column value.
This limit is the maximum number of bytes that can be returned for any column value. The limit applies only to com.sun.star.sdbc.DataType.BINARY , com.sun.star.sdbc.DataType.VARBINARY , com.sun.star.sdbc.DataType.LONGVARBINARY , com.sun.star.sdbc.DataType.CHAR , com.sun.star.sdbc.DataType.VARCHAR , and com.sun.star.sdbc.DataType.LONGVARCHAR columns. If the limit is exceeded, the excess data is silently discarded.
There is no limitation, if set to zero.
"""
@abstractproperty
def MaxRows(self) -> int:
"""
retrieves the maximum number of rows that a ResultSet can contain.
If the limit is exceeded, the excess rows are silently dropped.
There is no limitation, if set to zero.
"""
@abstractproperty
def Password(self) -> str:
"""
determines the user for whom to open the connection.
"""
@abstractproperty
def QueryTimeOut(self) -> int:
"""
retrieves the number of seconds the driver will wait for a Statement to execute.
If the limit is exceeded, a com.sun.star.sdbc.SQLException is thrown. There is no limitation, if set to zero.
"""
@abstractproperty
def ResultSetType(self) -> int:
"""
determine the result set type.
"""
@abstractproperty
def TransactionIsolation(self) -> int:
"""
indicates the transaction isolation level, which should be used for the connection.
"""
@abstractproperty
def TypeMap(self) -> 'XNameAccess_e2ab0cf6':
"""
is the type map that will be used for the custom mapping of SQL structured types and distinct types.
"""
@abstractproperty
def URL(self) -> str:
"""
is the connection URL.
Could be used instead of the DataSourceName.
"""
@abstractproperty
def User(self) -> str:
"""
determines the user for whom to open the connection.
"""
__all__ = ['RowSet']
| 36.948148 | 424 | 0.685245 |
f322de074b3ea4b3fd5d3377bcfba25f5a9920b3 | 5,040 | py | Python | talon_one/models/inline_response2006.py | talon-one/talon_one.py | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | [
"MIT"
] | 1 | 2021-03-05T06:41:26.000Z | 2021-03-05T06:41:26.000Z | talon_one/models/inline_response2006.py | talon-one/talon_one.py | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | [
"MIT"
] | 1 | 2021-09-07T08:56:58.000Z | 2021-09-07T08:56:58.000Z | talon_one/models/inline_response2006.py | talon-one/talon_one.py | f863bb3c2cc5ddc94d9227adcf14947b2ea7db41 | [
"MIT"
] | 1 | 2019-05-21T10:27:54.000Z | 2019-05-21T10:27:54.000Z | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class InlineResponse2006(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'total_result_size': 'int',
'data': 'list[Referral]'
}
attribute_map = {
'total_result_size': 'totalResultSize',
'data': 'data'
}
def __init__(self, total_result_size=None, data=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse2006 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._total_result_size = None
self._data = None
self.discriminator = None
self.total_result_size = total_result_size
self.data = data
@property
def total_result_size(self):
"""Gets the total_result_size of this InlineResponse2006. # noqa: E501
:return: The total_result_size of this InlineResponse2006. # noqa: E501
:rtype: int
"""
return self._total_result_size
@total_result_size.setter
def total_result_size(self, total_result_size):
"""Sets the total_result_size of this InlineResponse2006.
:param total_result_size: The total_result_size of this InlineResponse2006. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and total_result_size is None: # noqa: E501
raise ValueError("Invalid value for `total_result_size`, must not be `None`") # noqa: E501
self._total_result_size = total_result_size
@property
def data(self):
"""Gets the data of this InlineResponse2006. # noqa: E501
:return: The data of this InlineResponse2006. # noqa: E501
:rtype: list[Referral]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this InlineResponse2006.
:param data: The data of this InlineResponse2006. # noqa: E501
:type: list[Referral]
"""
if self.local_vars_configuration.client_side_validation and data is None: # noqa: E501
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2006):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse2006):
return True
return self.to_dict() != other.to_dict()
| 33.825503 | 647 | 0.624405 |
2e69639822990ba77ef76b36bc7df273573566e6 | 3,208 | py | Python | fn_scheduler/fn_scheduler/components/scheduled_rule_pause_resume.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2020-08-25T03:43:07.000Z | 2020-08-25T03:43:07.000Z | fn_scheduler/fn_scheduler/components/scheduled_rule_pause_resume.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2019-07-08T16:57:48.000Z | 2019-07-08T16:57:48.000Z | fn_scheduler/fn_scheduler/components/scheduled_rule_pause_resume.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import ResultPayload
from fn_scheduler.components import SECTION_SCHEDULER
from fn_scheduler.lib.scheduler_helper import ResilientScheduler
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'scheduled_rule_pause"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get(SECTION_SCHEDULER, {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get(SECTION_SCHEDULER, {})
@function("scheduled_rule_pause")
def _scheduled_rule_pause_function(self, event, *args, **kwargs):
"""Function: Pause a scheduled rule"""
try:
# Get the function parameters:
scheduler_label = kwargs.get("scheduler_label") # text
log = logging.getLogger(__name__)
log.info("scheduler_label: %s", scheduler_label)
rc = ResultPayload(SECTION_SCHEDULER, **kwargs)
job = self.find_job_by_label(scheduler_label)
if job is None:
raise KeyError("Job not found: {}".format(scheduler_label))
job.pause()
yield StatusMessage("Job paused: {}".format(scheduler_label))
results = rc.done(True, ResilientScheduler.sanitize_job(job))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
@function("scheduled_rule_resume")
def _scheduled_rule_resume_function(self, event, *args, **kwargs):
"""Function: Resume a scheduled job"""
try:
# Get the function parameters:
scheduler_label = kwargs.get("scheduler_label") # text
log = logging.getLogger(__name__)
log.info("scheduler_label: %s", scheduler_label)
rc = ResultPayload(SECTION_SCHEDULER, **kwargs)
job = self.find_job_by_label(scheduler_label)
if job is None:
raise KeyError("Job not found: {}".format(scheduler_label))
job.resume()
yield StatusMessage("Job resumed: {}".format(scheduler_label))
results = rc.done(True, ResilientScheduler.sanitize_job(job))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
def find_job_by_label(self, scheduler_label):
"""
find the job by it's label
:param scheduler_label:
:return: job found or None
"""
scheduler = ResilientScheduler.get_scheduler()
jobs = scheduler.get_jobs()
for job in jobs:
if job.id.lower() == scheduler_label.lower():
return job
return None
| 35.252747 | 114 | 0.644638 |
d68650fff76d8fe33b5db8606628d7484753a51e | 10,565 | py | Python | support/data_upload.py | ErinBryson/Robs_Data_Collector | 8f2de121ee821f09ba5ff274fc13258a9d571186 | [
"MIT"
] | null | null | null | support/data_upload.py | ErinBryson/Robs_Data_Collector | 8f2de121ee821f09ba5ff274fc13258a9d571186 | [
"MIT"
] | 6 | 2021-06-06T19:38:38.000Z | 2021-08-11T19:44:17.000Z | support/data_upload.py | ErinBryson/robs_data_collector | 8f2de121ee821f09ba5ff274fc13258a9d571186 | [
"MIT"
] | null | null | null | ########################################################################################################################
# FILE DETAILS #
# -------------------------------------------------------------------------------------------------------------------- #
# PROGRAM: Fixed Gas Data Aggregator #
# Author: Erin Bryson #
########################################################################################################################
########################################################################################################################
# Imports #
# -------------------------------------------------------------------------------------------------------------------- #
import json as js
import pandas as pd
import glob
from typing import Callable
########################################################################################################################
# GLOBAL FUNCTIONS #
# -------------------------------------------------------------------------------------------------------------------- #
def re_index_df(df):
return df.reset_index(drop=True)
def add_data_row(export_df: pd.DataFrame, temp_data: pd.DataFrame):
return pd.concat([export_df, temp_data])
def get_input_data_file_paths(input_data_dir, input_data_file_name) -> list:
return glob.glob(f'{input_data_dir}/**/{input_data_file_name}', recursive=True)
########################################################################################################################
# CLASSES #
# -------------------------------------------------------------------------------------------------------------------- #
class Config:
def __init__(self):
self._config = None
self.file_name = None
self.report_name = None
self.report_ext = None
self.datasheets_metadata = None
self.datasheets_names = []
@property
def config(self):
return self._config
@config.setter
def config(self, config_file_name: str) -> None:
with open(config_file_name) as config_file:
self._config = js.load(config_file)
def config_settings(self):
self.report_name = self.config["report_name"]
self.report_ext = self.config["report_ext"]
self.datasheets_metadata = self.config["data_sheets"]
# Store datasheet names
for key in self.config["data_sheets"].keys():
self.datasheets_names.append(key)
def reset(self):
self.__init__()
class DataSheet:
def __init__(self, name: str, sheet_dict: dict):
self.name = name
self.sheet_dict = sheet_dict
@property
def header_bool(self):
bool_val = self.sheet_dict["headers_bool"]
if bool_val:
return True
return False
@property
def header(self):
"""This property sets 'header' parameter in read_excel() either to '0' (headers located on top row of excel
sheet) or 'None'(no headers used in excel sheet)"""
if self.header_bool:
return 0
return None
@property
def transpose_bool(self):
"""Store whether data in sheet should be transposed."""
if self.sheet_dict["transpose_bool"] == 1:
return True
return False
# If data filter columns if specified
@property
def use_cols(self):
if self.header_bool:
try:
return self.sheet_dict["headers_nm"]
except KeyError:
print("ERROR! Use Cols list missing from config file!")
return KeyError
return None
# Get row data map if specified
@property
def data_map(self):
return self.sheet_dict["data_map"]
@property
def data_map_keys(self):
data_map_keys = []
for key in self.data_map:
if key.isnumeric():
final_key = int(key)
else:
final_key = key
data_map_keys.append(final_key)
return data_map_keys
@property
def data_map_values(self):
data_map_values = []
for key in self.data_map:
data_map_values.append(self.data_map[key])
return data_map_values
def import_data(self, file_name) -> pd.DataFrame:
return pd.read_excel(file_name, sheet_name=self.name, header=self.header, usecols=self.use_cols)
def clean_data(self, data_df: pd.DataFrame) -> pd.DataFrame:
if self.use_cols is None:
use_col_num = 0
else:
use_col_num = len(self.use_cols)
# Drop NA values
data_df = data_df.dropna()
# Transpose data, if needed
if self.transpose_bool:
data_df = data_df.T.reset_index(drop=True)
if use_col_num != 1:
# Set column names to equal to row 1
data_df.columns = data_df.iloc[0]
# Filter out all undesired columns
data_df = data_df.filter(items=self.data_map_keys, axis=1)
# print(data_df)
data_df.columns = self.data_map_values
if use_col_num != 1:
# Drop the now-redundant first row
data_df = data_df.drop(index=0).reset_index(drop=True)
# Reset Index
data_df = data_df.reset_index(drop=True)
data_df.columns = self.data_map_values
return data_df
class DataImporter:
def __init__(self):
self._config = None
self._root_path = ""
self.import_file_path_list = None
self.data_map = {}
self.agg_data_df_cols = []
self.datasheet_object_list = []
self.agg_data_df = None
self.file_count = None
@property
def config(self):
return self._config
@config.setter
def config(self, config: "Config"):
self._config = config
@property
def root_path(self):
return self._root_path
@root_path.setter
def root_path(self, root_path):
self._root_path = root_path
def config_import_settings(self):
self.import_file_path_list = get_input_data_file_paths(
self.root_path,
f"{self.config.report_name}.{self.config.report_ext}"
)
self.file_count = len(self.import_file_path_list)
# Populate sheet_obj_list with constructed sheet objects
for datasheet_name in self.config.datasheets_names:
# Get metadata for specific sheet
datasheet_metadata = self.config.datasheets_metadata[datasheet_name]
# Create a datasheet object for each datasheet
sheet_obj = DataSheet(datasheet_name, datasheet_metadata)
# Build list of aggregated data DataFrame columns
for data_map_value in sheet_obj.data_map_values:
self.agg_data_df_cols.append(data_map_value)
# Add datasheet object to the list of datasheets
self.datasheet_object_list.append(sheet_obj)
self.agg_data_df = pd.DataFrame(columns=self.agg_data_df_cols)
def concat_import_data(self, data_file_path):
# Create an empty DataFrame to store input data
# NOTE: Creating an empty DataFrame with a column allows data to be appended below
temp_df = pd.DataFrame({'temp_col': []})
# Import data from each sheet
for datasheet_object in self.datasheet_object_list:
# print(sheet_obj.name)
sheet_df = datasheet_object.import_data(data_file_path)
# Clean sheet data
sheet_df = datasheet_object.clean_data(sheet_df)
# Add imported data to new data row
temp_df = pd.concat([temp_df, sheet_df], axis=1)
# Clean up
del sheet_df
# Drop the temporary column used for work-around
temp_df = temp_df.drop(columns='temp_col')
# Add new data row
self.agg_data_df = add_data_row(self.agg_data_df, temp_df)
# Clean up
del temp_df
def aggregate_data(self, status_message_func: Callable = None, download_button=None, progress_bar=None):
progress_num = 0
# progress_bar['maximum'] = self.file_count
if progress_bar:
progress_bar['maximum'] = self.file_count
for data_file_path in self.import_file_path_list:
# Concat datasheet data
self.concat_import_data(data_file_path)
# Increment file_counter by 1
progress_num += 1
# Pass file number into Callable Status Message Function
if status_message_func:
status_message_func(progress_num)
# Re-Index Aggregated Data
self.agg_data_df = re_index_df(self.agg_data_df)
download_button['state'] = 'active'
def reset(self):
self.__init__()
class DataExporter:
def __init__(self, export_df, export_data_full_file_name):
self.export_df = export_df
self.export_data_full_file_name = export_data_full_file_name
# Fine position of final "."
ext_start_pos = self.export_data_full_file_name.rfind(".")
# Get the file extension
file_ext = self.export_data_full_file_name[ext_start_pos + 1:]
# IF file extension is supported, set the file_ext property value
if file_ext in self.supported_ext.keys():
self.file_ext = file_ext
@property
def supported_ext(self):
return {
"xlsx": self.export_df.to_excel,
"csv": self.export_df.to_csv
}
def export(self):
self.supported_ext[self.file_ext](self.export_data_full_file_name)
########################################################################################################################
# END FILE #
########################################################################################################################
| 33.22327 | 120 | 0.51027 |
0fa47268d9d628afe49da8d4a996c283e29911ea | 12,996 | py | Python | multiagent-particle-envs/multiagent/environment.py | ScorpioPeng/maddpg | d4b109da6f4a21958b20825d37ade86568c48237 | [
"MIT"
] | null | null | null | multiagent-particle-envs/multiagent/environment.py | ScorpioPeng/maddpg | d4b109da6f4a21958b20825d37ade86568c48237 | [
"MIT"
] | null | null | null | multiagent-particle-envs/multiagent/environment.py | ScorpioPeng/maddpg | d4b109da6f4a21958b20825d37ade86568c48237 | [
"MIT"
] | null | null | null | import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from multiagent.multi_discrete import MultiDiscrete
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, shared_viewer=True):
self.world = world
self.agents = self.world.policy_agents
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
# environment parameters
self.discrete_action_space = True
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
self.discrete_action_input = False
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
# configure spaces
self.action_space = []
self.observation_space = []
for agent in self.agents:
total_action_space = []
# physical action space
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)
if agent.movable:
total_action_space.append(u_action_space)
# communication action space
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)
if not agent.silent:
total_action_space.append(c_action_space)
# total action space
if len(total_action_space) > 1:
# all action spaces are discrete, so simplify to MultiDiscrete action space
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
# observation space
obs_dim = len(observation_callback(agent, self.world))
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
agent.action.c = np.zeros(self.world.dim_c)
# rendering
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewers = [None]
else:
self.viewers = [None] * self.n
self._reset_render()
def step(self, action_n):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
self.agents = self.world.policy_agents
# set action for each agent
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space[i])
# advance world state
self.world.step()
# record observation for each agent
for agent in self.agents:
obs_n.append(self._get_obs(agent))
reward_n.append(self._get_reward(agent))
done_n.append(self._get_done(agent))
info_n['n'].append(self._get_info(agent))
# all agents get total reward in cooperative case
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [reward] * self.n
return obs_n, reward_n, done_n, info_n
def reset(self):
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
return obs_n
# get info used for benchmarking
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
# get observation for a particular agent
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
def _get_done(self, agent):
if self.done_callback is None:
return False
return self.done_callback(agent, self.world)
# get reward for a particular agent
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
# process action
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
# physical action
if self.discrete_action_input: #False
agent.action.u = np.zeros(self.world.dim_p)
# process discrete action
if action[0] == 1: agent.action.u[0] = -1.0
if action[0] == 2: agent.action.u[0] = +1.0
if action[0] == 3: agent.action.u[1] = -1.0
if action[0] == 4: agent.action.u[1] = +1.0
else:
if self.force_discrete_action:
d = np.argmax(action[0])
action[0][:] = 0.0
action[0][d] = 1.0
if self.discrete_action_space:
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
else:
agent.action.u = action[0]
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
action = action[1:]
if not agent.silent:
# communication action
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
# make sure we used all elements of action
assert len(action) == 0
# reset rendering assets
def _reset_render(self):
self.render_geoms = None
self.render_geoms_xform = None
# render environment
def render(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
print(message)
for i in range(len(self.viewers)):
# create viewers (if necessary)
if self.viewers[i] is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.viewers[i] = rendering.Viewer(700,700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
if 'agent' in entity.name:
geom.set_color(*entity.color, alpha=0.5)
else:
geom.set_color(*entity.color)
# if 'boat' in entity.name:
# geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
# add geoms to viewer
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from multiagent import rendering
# update bounds to center around agent
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
# render to display or array
results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))
return results
# create receptor field locations in local coordinate frame
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
# circular receptive field
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
# add origin
dx.append(np.array([0.0, 0.0]))
# grid receptive field
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x,y]))
return dx
# vectorized wrapper for a batch of multi-agent environments
# assumes all environments have the same observation and action space
class BatchMultiAgentEnv(gym.Env):
metadata = {
'runtime.vectorized': True,
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, env_batch):
self.env_batch = env_batch
@property
def n(self):
return np.sum([env.n for env in self.env_batch])
@property
def action_space(self):
return self.env_batch[0].action_space
@property
def observation_space(self):
return self.env_batch[0].observation_space
def step(self, action_n, time):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
i = 0
for env in self.env_batch:
obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)
i += env.n
obs_n += obs
# reward = [r / len(self.env_batch) for r in reward]
reward_n += reward
done_n += done
return obs_n, reward_n, done_n, info_n
def reset(self):
obs_n = []
for env in self.env_batch:
obs_n += env.reset()
return obs_n
# render environment
def render(self, mode='human', close=True):
results_n = []
for env in self.env_batch:
results_n += env.render(mode, close)
return results_n
| 38.449704 | 124 | 0.573176 |
99f3850abb8be151725bc753504a25abee5542b0 | 111,507 | py | Python | jax/_src/lax/control_flow.py | erwincoumans/jax | f0e55e3ce241f997b097bd9381504c08a9d35cf8 | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2021-04-24T03:26:39.000Z | 2022-01-28T14:25:13.000Z | jax/_src/lax/control_flow.py | erwincoumans/jax | f0e55e3ce241f997b097bd9381504c08a9d35cf8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/_src/lax/control_flow.py | erwincoumans/jax | f0e55e3ce241f997b097bd9381504c08a9d35cf8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Control flow primitives.
"""
import collections
import functools
import inspect
import itertools
import operator
import os
from typing import Any, Callable, Optional, Sequence, Tuple, TypeVar
import numpy as np
import jax
from jax._src import api
from jax import core
from jax._src import dtypes
from jax._src import source_info_util
from jax._src import util
from jax._src.lax import lax
from jax import linear_util as lu
from jax.core import ConcreteArray, ShapedArray, raise_to_shaped
from jax.api_util import flatten_fun_nokwargs
from jax.interpreters import ad
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import batching
from jax.interpreters import masking
from jax.lib import xla_bridge as xb
from jax.lib import xla_client
from jax._src.traceback_util import api_boundary
from jax._src.util import (partial, unzip2, unzip3, safe_map, safe_zip,
split_list, cache, extend_name_stack)
from jax.tree_util import (tree_flatten, tree_unflatten, treedef_is_leaf,
treedef_children, treedef_tuple, tree_multimap,
tree_leaves, tree_structure)
from jax import ad_util
from jax.config import config
xops = xla_client.ops
_map = safe_map
zip = safe_zip
_reduce = functools.reduce
T = TypeVar('T')
Array = Any
@cache()
def _initial_style_open_jaxpr(fun: Callable, in_tree, in_avals,
primitive_name: Optional[str] = None):
wrapped_fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
debug = pe.debug_info(fun, in_tree, False, primitive_name or "<unknown>")
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, in_avals, debug)
return jaxpr, consts, out_tree()
@cache()
def _initial_style_jaxpr(fun: Callable, in_tree, in_avals,
primitive_name: Optional[str] = None):
jaxpr, consts, out_tree = _initial_style_open_jaxpr(
fun, in_tree, in_avals, primitive_name)
closed_jaxpr = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
return closed_jaxpr, consts, out_tree
@cache()
def _initial_style_jaxprs_with_common_consts(
funs: Sequence[Callable], in_tree, in_avals, primitive_name: str):
# When staging the branches of a conditional into jaxprs, constants are
# extracted from each branch and converted to jaxpr arguments. To use the
# staged jaxprs as the branches to a conditional *primitive*, we need for
# their (input) signatures to match. This function "joins" the staged jaxprs:
# for each one, it makes another that accepts *all* constants, but only uses
# those that it needs (dropping the rest).
jaxprs, all_consts, all_out_trees = \
unzip3(_initial_style_open_jaxpr(fun, in_tree, in_avals, primitive_name)
for fun in funs)
newvar = core.gensym(jaxprs, suffix='_')
all_const_avals = [[raise_to_shaped(core.get_aval(c)) for c in consts]
for consts in all_consts]
unused_const_vars = [[newvar(aval) for aval in const_avals]
for const_avals in all_const_avals]
def pad_jaxpr_constvars(i, jaxpr):
prefix = util.concatenate(unused_const_vars[:i])
suffix = util.concatenate(unused_const_vars[i + 1:])
constvars = [*prefix, *jaxpr.constvars, *suffix]
return core.Jaxpr(constvars=constvars, invars=jaxpr.invars,
outvars=jaxpr.outvars, eqns=jaxpr.eqns)
consts = util.concatenate(all_consts)
jaxprs = [pad_jaxpr_constvars(i, jaxpr) for i, jaxpr in enumerate(jaxprs)]
closed_jaxprs = [core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
for jaxpr in jaxprs]
return closed_jaxprs, consts, all_out_trees
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _typecheck_param(prim, param, name, msg_required, pred):
msg = (f'invalid {prim} param {name} of type {type(param).__name__}, '
f'{msg_required} required:')
param_str = str(param)
sep = os.linesep if os.linesep in param_str else ' '
msg = sep.join([msg, param_str])
core.typecheck_assert(pred, msg)
### fori_loop and while_loop
def _fori_cond_fun(loop_carry):
i, upper, _ = loop_carry
return lax.lt(i, upper)
@cache()
def _fori_body_fun(body_fun):
def while_body_fun(loop_carry):
i, upper, x = loop_carry
return lax.add(i, lax._const(i, 1)), upper, body_fun(i, x)
return while_body_fun
@cache()
def _fori_scan_body_fun(body_fun):
def scanned_fun(loop_carry, _):
i, x = loop_carry
return (i + 1, body_fun(i, x)), None
return scanned_fun
@api_boundary
def fori_loop(lower, upper, body_fun, init_val):
"""Loop from ``lower`` to ``upper`` by reduction to :func:`jax.lax.while_loop`.
The type signature in brief is
.. code-block:: haskell
fori_loop :: Int -> Int -> ((int, a) -> a) -> a -> a
The semantics of ``fori_loop`` are given by this Python implementation::
def fori_loop(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
Unlike that Python version, ``fori_loop`` is implemented in terms of either a
call to :func:`jax.lax.while_loop` or a call to :func:`jax.lax.scan`. If the
trip count is static (meaning known at tracing time, perhaps because ``lower``
and ``upper` are Python integer literals) then the ``fori_loop`` is
implemented in terms of ``scan`` and reverse-mode autodiff is supported;
otherwise, a ``while_loop`` is used and reverse-mode autodiff is not
supported. See those functions' docstrings for more information.
Also unlike the Python analogue, the loop-carried value ``val`` must hold a
fixed shape and dtype across all iterations (and not just be consistent up to
NumPy rank/shape broadcasting and dtype promotion rules, for example). In
other words, the type ``a`` in the type signature above represents an array
with a fixed shape and dtype (or a nested tuple/list/dict container data
structure with a fixed structure and arrays with fixed shape and dtype at the
leaves).
Args:
lower: an integer representing the loop index lower bound (inclusive)
upper: an integer representing the loop index upper bound (exclusive)
body_fun: function of type ``(int, a) -> a``.
init_val: initial loop carry value of type ``a``.
Returns:
Loop value from the final iteration, of type ``a``.
"""
# TODO(phawkins): perhaps do more type checking here, better error messages.
lower_dtype = dtypes.canonicalize_dtype(lax.dtype(lower))
upper_dtype = dtypes.canonicalize_dtype(lax.dtype(upper))
if lower_dtype != upper_dtype:
msg = ("lower and upper arguments to fori_loop must have equal types, "
"got {} and {}")
raise TypeError(msg.format(lower_dtype.name, upper_dtype.name))
# If we can specialize on the trip count, call scan instead of a while_loop
# to enable efficient reverse-mode differentiation.
if (isinstance(core.get_aval(lower), ConcreteArray) and
isinstance(core.get_aval(upper), ConcreteArray)):
try:
lower_ = int(lower)
upper_ = int(upper)
except TypeError:
use_scan = False
else:
use_scan = True
else:
use_scan = False
if use_scan:
(_, result), _ = scan(_fori_scan_body_fun(body_fun), (lower_, init_val),
None, length=upper_ - lower_)
else:
_, _, result = while_loop(_fori_cond_fun, _fori_body_fun(body_fun),
(lower, upper, init_val))
return result
@api_boundary
def while_loop(cond_fun: Callable[[T], bool],
body_fun: Callable[[T], T],
init_val: T) -> T:
"""Call ``body_fun`` repeatedly in a loop while ``cond_fun`` is True.
The type signature in brief is
.. code-block:: haskell
while_loop :: (a -> Bool) -> (a -> a) -> a -> a
The semantics of ``while_loop`` are given by this Python implementation::
def while_loop(cond_fun, body_fun, init_val):
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
Unlike that Python version, ``while_loop`` is a JAX primitive and is lowered
to a single XLA While HLO. That makes it useful for reducing compilation times
for jit-compiled functions, since native Python loop constructs in an ``@jit``
function are unrolled, leading to large XLA computations.
Also unlike the Python analogue, the loop-carried value ``val`` must hold a
fixed shape and dtype across all iterations (and not just be consistent up to
NumPy rank/shape broadcasting and dtype promotion rules, for example). In
other words, the type ``a`` in the type signature above represents an array
with a fixed shape and dtype (or a nested tuple/list/dict container data
structure with a fixed structure and arrays with fixed shape and dtype at the
leaves).
Another difference from using Python-native loop constructs is that
``while_loop`` is not reverse-mode differentiable because XLA computations
require static bounds on memory requirements.
Args:
cond_fun: function of type ``a -> Bool``.
body_fun: function of type ``a -> a``.
init_val: value of type ``a``, a type that can be a scalar, array, or any
pytree (nested Python tuple/list/dict) thereof, representing the initial
loop carry value.
Returns:
The output from the final iteration of body_fun, of type ``a``.
"""
if config.jax_disable_jit:
try:
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
except core.ConcretizationTypeError:
# Can't run this while_loop in Python (e.g. because there's a vmap
# transformation on it), so we fall back to the primitive version.
pass
def _create_jaxpr(init_val):
init_vals, in_tree = tree_flatten((init_val,))
init_avals = tuple(_map(_abstractify, init_vals))
cond_jaxpr, cond_consts, cond_tree = _initial_style_jaxpr(
cond_fun, in_tree, init_avals, "while_cond")
body_jaxpr, body_consts, body_tree = _initial_style_jaxpr(
body_fun, in_tree, init_avals, "while_loop")
if not treedef_is_leaf(cond_tree) or len(cond_jaxpr.out_avals) != 1:
msg = "cond_fun must return a boolean scalar, but got pytree {}."
raise TypeError(msg.format(cond_tree))
pred_aval = cond_jaxpr.out_avals[0]
if (not isinstance(pred_aval, ShapedArray)
or pred_aval.strip_weak_type().strip_named_shape() != ShapedArray((), np.bool_)):
msg = "cond_fun must return a boolean scalar, but got output type(s) {}."
raise TypeError(msg.format(cond_jaxpr.out_avals))
return init_vals, init_avals, body_jaxpr, in_tree, cond_jaxpr, cond_consts, body_consts, body_tree
# The body input and output avals must match exactly. However, we want to account for
# the case when init contains weakly-typed values (e.g. Python scalars), with avals that
# may not match the output despite being compatible by virtue of their weak type.
# To do this, we compute the jaxpr in two passes: first with the raw inputs, and if
# necessary, a second time with modified init values.
init_vals, init_avals, body_jaxpr, in_tree, *rest = _create_jaxpr(init_val)
new_init_vals, changed = _promote_weak_typed_inputs(init_vals, init_avals, body_jaxpr.out_avals)
if changed:
new_init_val, = tree_unflatten(in_tree, new_init_vals)
init_vals, init_avals, body_jaxpr, in_tree, *rest = _create_jaxpr(new_init_val)
cond_jaxpr, cond_consts, body_consts, body_tree = rest
in_tree_children = in_tree.children()
assert len(in_tree_children) == 1
_check_tree_and_avals("body_fun output and input",
body_tree, body_jaxpr.out_avals,
in_tree_children[0], init_avals)
outs = while_p.bind(*itertools.chain(cond_consts, body_consts, init_vals),
cond_nconsts=len(cond_consts), cond_jaxpr=cond_jaxpr,
body_nconsts=len(body_consts), body_jaxpr=body_jaxpr)
return tree_unflatten(body_tree, outs)
def _while_loop_abstract_eval(*args, **kwargs):
return _map(raise_to_shaped, kwargs["body_jaxpr"].out_avals)
def _while_loop_translation_rule(c, axis_env, name_stack, avals, backend, *args,
cond_jaxpr, body_jaxpr, cond_nconsts, body_nconsts):
cond_consts, body_consts, init_vals = split_list(args, [cond_nconsts, body_nconsts])
batched = bool(cond_jaxpr.out_avals[0].shape)
# Since jaxprs don't have tuples and have multiple return values, but we need
# the HLO While loop to take a single tuple input and output a single boolean
# (for the cond computation) or a single tuple output (for the body
# computation), we build XLA computations that handle the tuple munging before
# generating a Call into the computations formed from the jaxprs.
init_carry = xops.Tuple(c, cond_consts + body_consts + init_vals)
cond_c = xb.make_computation_builder("cond_computation")
cond_carry = xb.parameter(cond_c, 0, c.get_shape(init_carry))
cond_carry_elts = [xops.GetTupleElement(cond_carry, i) for i in range(len(args))]
x, _, z = split_list(cond_carry_elts, [cond_nconsts, body_nconsts])
pred, = xla.jaxpr_subcomp(cond_c, cond_jaxpr.jaxpr, backend, axis_env,
_map(partial(xb.constant, cond_c), cond_jaxpr.consts),
extend_name_stack(name_stack, 'cond'), *(x + z))
if batched:
scalar = ShapedArray((), np.bool_)
or_ = xla.primitive_subcomputation(lax.or_p, scalar, scalar)
pred = xops.Reduce(cond_c, [pred], [xb.constant(cond_c, np.array(False))], or_,
list(range(cond_jaxpr.out_avals[0].ndim)))
body_c = xb.make_computation_builder("body_computation")
body_carry = xb.parameter(body_c, 0, c.get_shape(init_carry))
body_carry_elts = [xops.GetTupleElement(body_carry, i) for i in range(len(args))]
x, y, z = split_list(body_carry_elts, [cond_nconsts, body_nconsts])
new_z = xla.jaxpr_subcomp(body_c, body_jaxpr.jaxpr, backend, axis_env,
_map(partial(xb.constant, body_c), body_jaxpr.consts),
extend_name_stack(name_stack, 'body'), *(y + z))
if batched:
body_pred, = xla.jaxpr_subcomp(body_c, cond_jaxpr.jaxpr, backend, axis_env,
_map(partial(xb.constant, body_c), cond_jaxpr.consts),
extend_name_stack(name_stack, 'body_pred'), *(x + z))
new_z = _map(partial(_pred_bcast_select, body_c, body_pred), new_z, z, body_jaxpr.out_avals)
assert _map(body_c.get_shape, new_z) == _map(body_c.get_shape, z) # no broadcast
new_carry = xops.Tuple(body_c, list(itertools.chain(x, y, new_z)))
ans = xops.While(cond_c.build(pred), body_c.build(new_carry), init_carry)
ans_elts = [xops.GetTupleElement(ans, i) for i in range(len(args))]
_, _, z = split_list(ans_elts, [cond_nconsts, body_nconsts])
return xops.Tuple(c, z)
def _pred_bcast_select(c, pred, x, y, x_y_aval: core.AbstractValue):
pred_shape = c.get_shape(pred).dimensions()
x_shape = c.get_shape(x).dimensions()
y_shape = c.get_shape(y).dimensions()
assert x_shape == y_shape
if x_y_aval is core.abstract_unit:
return x
elif x_y_aval is core.abstract_token:
return xops.AfterAll(c, [x, y])
else:
assert pred_shape == x_shape[:len(pred_shape)] == y_shape[:len(pred_shape)]
bcast_pred = xops.BroadcastInDim(pred, x_shape, list(range(len(pred_shape))))
return xops.Select(bcast_pred, x, y)
def _while_loop_batching_rule(args, dims, axis_name, main_type,
cond_nconsts, cond_jaxpr,
body_nconsts, body_jaxpr):
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
orig_batched = [d is not batching.not_mapped for d in dims]
cconst_bat, bconst_bat, init_bat = split_list(orig_batched, [cond_nconsts, body_nconsts])
# Fixpoint computation of which carry are batched: either
# batched from init, or the carry out is batched. Each iteration promotes
# at least one carry to batched. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_bat.
carry_bat = init_bat
for _ in range(1 + len(carry_bat)):
batched = bconst_bat + carry_bat
body_jaxpr_batched, carry_bat_out = batching.batch_jaxpr(
body_jaxpr, size, batched, instantiate=carry_bat,
axis_name=axis_name, main_type=main_type)
cond_jaxpr_batched, (pred_bat,) = batching.batch_jaxpr(
cond_jaxpr, size, cconst_bat + carry_bat,
instantiate=bool(cond_jaxpr.out_avals[0].shape),
axis_name=axis_name, main_type=main_type)
carry_bat_out = _map(partial(operator.or_, pred_bat), carry_bat_out)
if carry_bat_out == carry_bat:
break
else:
carry_bat = _map(operator.or_, carry_bat, carry_bat_out)
else:
assert False, "Fixpoint not reached"
consts, init = split_list(args, [cond_nconsts + body_nconsts])
const_dims, init_dims = split_list(dims, [cond_nconsts + body_nconsts])
new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0
else x for x, d in zip(consts, const_dims)]
new_init = [batching.broadcast(x, size, 0) if now_bat and not was_bat
else batching.moveaxis(x, d, 0) if now_bat and d != 0 else x
for x, d, was_bat, now_bat in zip(init, init_dims, init_bat, carry_bat)]
outs = while_p.bind(*(new_consts + new_init),
cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr_batched,
body_nconsts=body_nconsts, body_jaxpr=body_jaxpr_batched)
out_bdims = [0 if b else batching.not_mapped for b in carry_bat]
return outs, out_bdims
def _while_loop_jvp(primals, tangents, cond_nconsts, cond_jaxpr, body_nconsts,
body_jaxpr):
nonzeros = [type(t) is not ad_util.Zero for t in tangents]
cconst_nz, bconst_nz, init_nz = split_list(nonzeros, [cond_nconsts, body_nconsts])
carry_nz = init_nz
for _ in range(1 + len(carry_nz)):
body_nonzeros = bconst_nz + carry_nz
body_jvp, nonzeros_out = ad.jvp_jaxpr(
body_jaxpr, body_nonzeros, instantiate=carry_nz)
if nonzeros_out == carry_nz:
break
carry_nz = _map(operator.or_, carry_nz, nonzeros_out)
else:
assert False, "Fixpoint not reached"
nonzeros = cconst_nz + body_nonzeros
tangents = [ad.instantiate_zeros(t) if nz else t
for t, nz in zip(tangents, nonzeros)]
cconst, bconst, init = split_list(primals, [cond_nconsts, body_nconsts])
_, bconst_dot, init_dot = split_list(tangents, [cond_nconsts, body_nconsts])
bconst_dot = _prune_zeros(bconst_dot)
init_dot = _prune_zeros(init_dot)
num_carry = len(primals) - cond_nconsts - body_nconsts
body_jvp_rearranged = ad.rearrange_binders(
body_jvp,
[body_nconsts, num_carry], [len(bconst_dot), len(init_dot)],
[num_carry], [len(init_dot)])
newvar = core.gensym([cond_jaxpr.jaxpr])
invars_aug = (
cond_jaxpr.jaxpr.invars + [newvar(core.get_aval(x)) for x in init_dot])
cond_jaxpr_augmented = core.Jaxpr(cond_jaxpr.jaxpr.constvars,
invars_aug,
cond_jaxpr.jaxpr.outvars,
cond_jaxpr.jaxpr.eqns)
cond_jaxpr_augmented = core.ClosedJaxpr(cond_jaxpr_augmented, cond_jaxpr.consts)
out = while_p.bind(
*(cconst + bconst + bconst_dot + init + init_dot),
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr_augmented,
body_nconsts=len(bconst) + len(bconst_dot),
body_jaxpr=body_jvp_rearranged)
out_carry, out_carry_dot = split_list(out, [num_carry])
out_tangents_iter = iter(out_carry_dot)
out_tangents = [next(out_tangents_iter) if nz else ad_util.Zero.from_value(p)
for p, nz in zip(out_carry, nonzeros_out)]
return out_carry, out_tangents
def _while_partial_eval(trace: pe.JaxprTrace, *tracers: pe.Tracer, cond_nconsts: int,
cond_jaxpr: pe.ClosedJaxpr, body_nconsts: int,
body_jaxpr: pe.ClosedJaxpr) -> Sequence[pe.Tracer]:
"""An implementation of partial evaluation for while.
As long as some carry (and hence output) are known and the output
of `cond_jaxpr` is known, we use a portion of the loop body to compute the known
outputs of the `while_loop`. For the unknown outputs we generate Jaxpr to run
the whole while, including recomputing the known parts.
This means that we don't actually save any computation by partial
evaluation if there are unknown outputs.
What this achieves is that we can give a proper error for reverse
differentiation of `while`, because in that use of partial evaluation the
primal inputs are considered "known", and only the tangent computation is
unknown (see issue #2129).
"""
unknowns = [not t.pval.is_known() for t in tracers]
params = dict(cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts, body_jaxpr=body_jaxpr)
cond_consts_uk, body_consts_uk, carry_init_uk = split_list(unknowns, [cond_nconsts, body_nconsts])
# Fixpoint computation of unknown carry. Each iteration promotes
# at least one carry to unknown. We need one last iteration to prepare the jaxpr.
carry_uk = carry_init_uk
for _ in range(1 + len(carry_uk)):
body_jaxpr_known, _, carry_out_uk = pe.partial_eval_jaxpr( # type: ignore
body_jaxpr, body_consts_uk + carry_uk, instantiate=carry_uk)
if carry_out_uk == carry_uk:
break
else:
carry_uk = _map(operator.or_, carry_uk, carry_out_uk)
else:
assert False, "Fixpoint not reached"
cond_jaxpr_known, _, cond_uk = pe.partial_eval_jaxpr( # type: ignore
cond_jaxpr, cond_consts_uk + carry_uk, instantiate=False)
if cond_uk[0] or all([not uk for uk in unknowns]) or all(unknowns):
# If conditional is unknown, or all inputs are known, or all are unknown,
# just do the default processing.
return trace.default_process_primitive(while_p, tracers, params)
# Run the known part of the while. Prepare the inputs, as constants (if known), or
# as core.unit.
in_consts = [ core.unit if uk else t.pval.get_known()
for uk, t in zip(cond_consts_uk + body_consts_uk + carry_uk,
tracers)]
# There should be no residuals for the cond_jaxpr_known
assert 1 == len(cond_jaxpr_known.out_avals)
# We ignore the residuals from the body_jaxpr_known, so the type of inputs matches
# the type of outputs; residuals are at the end
if len(body_jaxpr_known.out_avals) > len(body_jaxpr.out_avals):
# TODO(necula): this is not quite enough; we should drop the residual computations also
body_jaxpr_known.jaxpr.outvars = body_jaxpr_known.jaxpr.outvars[:len(body_jaxpr.out_avals)]
out_known = while_p.bind(
*in_consts,
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr_known,
body_nconsts=body_nconsts,
body_jaxpr=body_jaxpr_known)
# Run the whole while_loop to get all the outputs, then merge with known ones
out_all: Sequence[pe.Tracer] = trace.default_process_primitive(while_p, tracers, params)
out_tracers: Sequence[pe.Tracer] = [
out_unknown if uk
else pe.JaxprTracer(trace, pe.PartialVal.known(known), out_unknown.recipe)
for uk, out_unknown, known in zip(carry_uk, out_all, out_known)]
return out_tracers
def _while_transpose_error(*_, **kwargs):
raise ValueError("Reverse-mode differentiation does not work for "
"lax.while_loop or lax.fori_loop. "
"Try using lax.scan instead.")
while_p = lax.Primitive('while')
while_p.multiple_results = True
while_p.def_impl(partial(xla.apply_primitive, while_p))
while_p.def_abstract_eval(_while_loop_abstract_eval)
ad.primitive_jvps[while_p] = _while_loop_jvp
pe.custom_partial_eval_rules[while_p] = _while_partial_eval
xla.initial_style_translations[while_p] = _while_loop_translation_rule
ad.primitive_transposes[while_p] = _while_transpose_error
batching.initial_style_batchers[while_p] = _while_loop_batching_rule
### cond and switch
@api_boundary
def switch(index, branches: Sequence[Callable], operand):
"""Apply exactly one of ``branches`` given by ``index``.
If ``index`` is out of bounds, it is clamped to within bounds.
Has the semantics of the following Python::
def switch(index, branches, operand):
index = clamp(0, index, len(branches) - 1)
return branches[index](operand)
Args:
index: Integer scalar type, indicating which branch function to apply.
branches: Sequence of functions (A -> B) to be applied based on `index`.
operand: Operand (A) input to whichever branch is applied.
"""
if len(np.shape(index)) != 0:
raise TypeError(
f"Branch index must be scalar, "
f"got {index} of shape {np.shape(index)}.")
try:
index_dtype = dtypes.result_type(index)
except TypeError as err:
msg = f"Index type must be an integer, got {index}."
raise TypeError(msg) from err
if index_dtype.kind not in 'iu':
raise TypeError(
f"Index type must be an integer, got {index} as {index_dtype}")
branches = tuple(branches)
if len(branches) == 0:
raise ValueError("Empty branch sequence")
elif len(branches) == 1:
return branches[0](operand)
index = lax.convert_element_type(index, np.int32)
lo = np.array(0, np.int32)
hi = np.array(len(branches) - 1, np.int32)
index = lax.clamp(lo, index, hi)
if (config.jax_disable_jit and
isinstance(core.get_aval(index), ConcreteArray)):
return branches[int(index)](operand)
ops, ops_tree = tree_flatten((operand,))
ops_avals = tuple(_map(_abstractify, ops))
jaxprs, consts, out_trees = _initial_style_jaxprs_with_common_consts(
branches, ops_tree, ops_avals, primitive_name='switch')
for i, (out_tree, jaxpr) in enumerate(zip(out_trees[1:], jaxprs[1:])):
_check_tree_and_avals(f"branch 0 and {i + 1} outputs",
out_trees[0], jaxprs[0].out_avals,
out_tree, jaxpr.out_avals)
linear = (False,) * (len(consts) + len(ops))
out = cond_p.bind(
index, *consts, *ops, branches=tuple(jaxprs), linear=linear)
return tree_unflatten(out_trees[0], out)
def _cond(pred, true_fun: Callable, false_fun: Callable, operand):
"""Conditionally apply ``true_fun`` or ``false_fun``.
``cond()`` has equivalent semantics to this Python implementation::
def cond(pred, true_fun, false_fun, operand):
if pred:
return true_fun(operand)
else:
return false_fun(operand)
``pred`` must be a scalar type.
Functions ``true_fun``/``false_fun`` may not need to refer to an ``operand``
to compute their result, but one must still be provided to the ``cond`` call
and be accepted by both the branch functions, e.g.::
jax.lax.cond(
get_predicate_value(),
lambda _: 23,
lambda _: 42,
operand=None)
Args:
pred: Boolean scalar type, indicating which branch function to apply.
true_fun: Function (A -> B), to be applied if ``pred`` is True.
false_fun: Function (A -> B), to be applied if ``pred`` is False.
operand: Operand (A) input to either branch depending on ``pred``. The type
can be a scalar, array, or any pytree (nested Python tuple/list/dict)
thereof.
Returns:
Value (B) of either ``true_fun(operand)`` or ``false_fun(operand)``,
depending on the value of ``pred``. The type can be a scalar, array, or any
pytree (nested Python tuple/list/dict) thereof.
"""
if isinstance(pred, Sequence) or np.ndim(pred) != 0:
raise TypeError(
f"Pred must be a scalar, got {pred} of " +
(f"type {type(pred)}" if isinstance(pred, Sequence)
else f"shape {np.shape(pred)}."))
try:
pred_dtype = dtypes.result_type(pred)
except TypeError as err:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred)) from err
if pred_dtype.kind != 'b':
if pred_dtype.kind in 'iuf':
pred = pred != 0
else:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred_dtype))
if config.jax_disable_jit and isinstance(core.get_aval(pred), ConcreteArray):
if pred:
return true_fun(operand)
else:
return false_fun(operand)
ops, ops_tree = tree_flatten((operand,))
ops_avals = tuple(_map(_abstractify, ops))
jaxprs, consts, out_trees = _initial_style_jaxprs_with_common_consts(
(true_fun, false_fun), ops_tree, ops_avals, 'cond')
true_jaxpr, false_jaxpr = jaxprs
out_tree, false_out_tree = out_trees
_check_tree_and_avals("true_fun and false_fun output",
out_tree, true_jaxpr.out_avals,
false_out_tree, false_jaxpr.out_avals)
index = lax.convert_element_type(pred, np.int32)
linear = (False,) * (len(consts) + len(ops))
out = cond_p.bind(
index, *consts, *ops,
branches=(false_jaxpr, true_jaxpr), linear=linear)
return tree_unflatten(out_tree, out)
@api_boundary
@functools.wraps(_cond)
def cond(*args, **kwargs):
# detect an attempt to call the former, deprecated cond
try:
ba = inspect.signature(_cond_with_per_branch_args).bind(*args, **kwargs)
except TypeError:
pass
else:
return _cond_with_per_branch_args(*ba.args)
return _cond(*args, **kwargs)
def _cond_with_per_branch_args(pred,
true_operand, true_fun: Callable,
false_operand, false_fun: Callable):
"""Conditionally apply ``true_fun`` or ``false_fun``.
Has equivalent semantics to this Python implementation::
def cond(pred, true_operand, true_fun, false_operand, false_fun):
if pred:
return true_fun(true_operand)
else:
return false_fun(false_operand)
Pred has to be a scalar type, collection types (list, tuple) are not supported
"""
return _cond(pred,
lambda op: true_fun(op[0]),
lambda op: false_fun(op[1]),
(true_operand, false_operand))
def _cond_abstract_eval(*args, **kwargs):
return _map(raise_to_shaped, kwargs["branches"][0].out_avals)
def _cond_translation_rule(c, axis_env, name_stack, avals, backend,
index, *args, branches, linear):
del linear # Unused.
def make_computation(name, jaxpr, op_shape):
c = xb.make_computation_builder(name + '_comp')
op = xb.parameter(c, 0, op_shape)
ops = [xops.GetTupleElement(op, i) for i in range(len(jaxpr.in_avals))]
outs = xla.jaxpr_subcomp(c, jaxpr.jaxpr, backend, axis_env,
_map(partial(xb.constant, c), jaxpr.consts),
extend_name_stack(name_stack, name + '_fun'), *ops)
return c.build(xops.Tuple(c, outs))
op = xops.Tuple(c, args)
op_shape = c.get_shape(op)
branch_computations = [
make_computation(f'branch_{i}', jaxpr, op_shape)
for i, jaxpr in enumerate(branches)]
return xops.Conditional(index, branch_computations, [op] * len(branches))
def _select_tree(indices, branch_vals):
assert len(branch_vals) > 0
if len(branch_vals) == 1:
return branch_vals[0]
mid = lax._const(indices, len(branch_vals) // 2)
return _bcast_select(lax.lt(indices, mid),
_select_tree(indices, branch_vals[:mid]),
_select_tree(indices - mid, branch_vals[mid:]))
def _bcast_select(pred, on_true, on_false):
if np.ndim(pred) != np.ndim(on_true):
idx = list(range(np.ndim(pred)))
pred = lax.broadcast_in_dim(pred, np.shape(on_true), idx)
return lax.select(pred, on_true, on_false)
def _cond_batching_rule(args, dims, axis_name, main_type, branches, linear):
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
index, *ops = args
index_dim, *op_dims = dims
if index_dim is not batching.not_mapped:
# Convert to a lax.select. While we could get away with not broadcasting
# some operands yet, because all outputs must be broadcast together anyway
# for the select we broadcast the input operands for simplicity and leave
# optimizations to XLA.
# TODO(mattjj,frostig): assumes branches are side-effect-free, revise!
index, *ops = [batching.bdim_at_front(x, d, size) for x, d in zip(args, dims)]
branches_batched = [
batching.batch_jaxpr(jaxpr, size, [True] * len(ops), True, axis_name, main_type)[0]
for jaxpr in branches]
branch_outs = []
for i, jaxpr in enumerate(branches_batched):
# Perform a select on the inputs for safety of reverse-mode autodiff; see
# https://github.com/google/jax/issues/1052
ops_ = [_bcast_select(lax.eq(index, lax._const(index, i)),
x, lax.stop_gradient(x))
if x is not core.unit else x for x in ops]
branch_outs.append(core.jaxpr_as_fun(jaxpr)(*ops_))
out = [_select_tree(index, outs) if outs[0] is not core.unit else outs[0]
for outs in zip(*branch_outs)]
return out, [0] * len(branch_outs[0])
else:
ops_bat = [d is not batching.not_mapped for d in op_dims]
ops = [batching.moveaxis(x, d, 0) if b else x
for b, x, d in zip(ops_bat, ops, op_dims)]
branches_out_bat = [
batching.batch_jaxpr(jaxpr, size, ops_bat, False, axis_name, main_type)[1]
for jaxpr in branches]
out_bat = [any(bat) for bat in zip(*branches_out_bat)]
branches_batched = tuple(
batching.batch_jaxpr(jaxpr, size, ops_bat, out_bat, axis_name, main_type)[0]
for jaxpr in branches)
out_dims = [0 if b else batching.not_mapped for b in out_bat]
out = cond_p.bind(
index, *ops, branches=branches_batched, linear=linear)
return out, out_dims
def _cond_jvp(primals, tangents, branches, linear):
nonzeros = [type(t) is not ad_util.Zero for t in tangents]
index_nz, *ops_nz = nonzeros
assert index_nz is False
branches_out_nz = [ad.jvp_jaxpr(jaxpr, ops_nz, instantiate=False)[1]
for jaxpr in branches]
out_nz = [any(nz) for nz in zip(*branches_out_nz)]
branches_jvp = tuple(ad.jvp_jaxpr(jaxpr, ops_nz, instantiate=out_nz)[0]
for jaxpr in branches)
index, *ops = primals
_, *ops_dot = tangents
ops_dot = _prune_zeros(ops_dot)
ops_lin = tuple(linear)
linear_jvp = ops_lin + (True,) * len(ops_dot)
out = cond_p.bind(
index, *ops, *ops_dot, branches=branches_jvp, linear=linear_jvp)
out_primals, out_tangents = split_list(out, [len(out_nz)])
out_tangents_iter = iter(out_tangents)
out_tangents = [next(out_tangents_iter) if nz else ad_util.Zero.from_value(p)
for p, nz in zip(out_primals, out_nz)]
return out_primals, out_tangents
def _cond_partial_eval(trace, *tracers, branches, linear):
unknowns = [t.pval[0] is not None for t in tracers]
index_uk, *ops_uk = unknowns
if index_uk:
# When the branch index is unknown, we stage out the whole cond.
params = dict(branches=branches, linear=linear)
return trace.default_process_primitive(cond_p, tracers, params)
branches_out_uks = []
for branch_jaxpr in branches:
_, _, out_uks = pe.partial_eval_jaxpr(branch_jaxpr, ops_uk, instantiate=False)
branches_out_uks.append(out_uks)
out_uks = [any(uks) for uks in zip(*branches_out_uks)]
branches_1, branches_2, branch_res_avals = [], [], []
for branch_jaxpr in branches:
branch_jaxpr_1, branch_jaxpr_2, _ = pe.partial_eval_jaxpr(
branch_jaxpr, ops_uk, instantiate=out_uks)
branch_num_res = len(branch_jaxpr_1.out_avals) - len(out_uks)
# move residuals to the front
move = [False] * len(ops_uk) + [True] * branch_num_res
branch_jaxpr_2 = pe.move_binders_to_front(branch_jaxpr_2, move)
# TODO(frostig,mattjj): pe.partial_eval_jaxpr should raise to shaped avals
res_avals = _map(
raise_to_shaped, branch_jaxpr_2.in_avals[:branch_num_res])
branches_1.append(branch_jaxpr_1)
branches_2.append(branch_jaxpr_2)
branch_res_avals.append(res_avals)
branches_1 = tuple(branches_1)
branches_2 = tuple(branches_2)
for jaxpr in branches_2[1:]:
assert len(jaxpr.out_avals) == len(branches_2[0].out_avals)
num_outs = len(branches_2[0].out_avals)
all_res_avals, res_avals_per_branch = _merge_branch_residuals(
branch_res_avals)
branches_1 = _join_cond_outputs(
branches_1, all_res_avals, res_avals_per_branch, num_outs)
branches_2 = _join_cond_pe_staged_jaxpr_inputs(
branches_2, all_res_avals, res_avals_per_branch)
# TODO(frostig,mattjj): reinstate this assertion once pe.partial_eval_jaxpr
# raises to shaped avals
# for j in branches_1[1:]:
# assert j.out_avals == branches_1[0].out_avals
num_res = len(all_res_avals)
_, in_consts = unzip2([t.pval for t in tracers])
out_consts_res = cond_p.bind(*in_consts, branches=branches_1, linear=linear)
out_consts, res = split_list(out_consts_res, [len(out_consts_res) - num_res])
# TODO(frostig,mattjj): remove raised_to_shaped of avals once
# pe.partial_eval_jaxpr handles it
out_avals = _map(raise_to_shaped, branches_2[0].out_avals)
out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uks)]
index_tracer = trace.instantiate_const(tracers[0])
ops_tracers = [trace.instantiate_const(t) if uk
else trace.new_instantiated_literal(core.unit)
for uk, t in zip(unknowns[1:], tracers[1:])]
res_tracers = _map(trace.new_instantiated_const, res)
out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)
for pv, const in zip(out_pvs, out_consts)]
linear_2 = (False,) * num_res + linear
params = dict(branches=branches_2, linear=linear_2)
eqn = pe.new_eqn_recipe(
[index_tracer] + res_tracers + ops_tracers, out_tracers, cond_p, params,
source_info_util.current())
for t in out_tracers: t.recipe = eqn
return out_tracers
# When partially evaluating conditionals, each branch produces residuals
# depending on the computation carried out by the branch, and a corresponding
# staged jaxpr that accepts those residuals as its first few inputs. The
# residual-producing branches are staged as jaxprs and bound right away in a
# conditional. The residual-consuming jaxprs are assembled together in a jaxpr
# conditional. The following helper functions ensure that both collections of
# jaxprs (those evaluated and those staged) are valid for joint use under their
# respective conditionals.
#
# In particular, the residuals derived from each original branch may have
# distinct types. Because the branches of conditionals must have identical type
# signatures, we join residuals together across branches into a common format.
# In order to set up a type signature that all branches can conform to, it would
# suffice to concatenate all branches' residuals. But concatenation can result
# in redundant inputs and outputs, and might lead to memory allocation that
# scales unnecessarily with the branch count. This function finds common
# residual types across branches for reuse, so as to avoid redundant
# allocation. It returns a list L of types (avals) representing the collection
# of residuals merged according to type, and, for each branch, a lookup table to
# match its residuals to their positions/types in L. Example input/output:
#
# [x], [y], [x, x] -> [x, y, x], [[0], [1], [0, 2]]
# [x], [x], [x, x] -> [x, x], [[0], [0], [0, 1]]
# [y, x, x], [x, z, y], [z, x] -> [y, x, x, z], [[0, 1, 2], [1, 3, 0], [3, 1]]
def _merge_branch_residuals(branch_res_avals):
def enumerate_equal(xs):
counts = {v: itertools.count() for v in set(xs)}
return [(x, next(counts[x])) for x in xs]
branch_res_tagged_avals = _map(enumerate_equal, branch_res_avals)
all_tagged_avals = _ordered_unique(util.concatenate(branch_res_tagged_avals))
indices = {v: i for i, v in enumerate(all_tagged_avals)}
branch_indices = [
[indices[aval] for aval in avals] for avals in branch_res_tagged_avals]
all_avals = [x for x, _ in all_tagged_avals]
return all_avals, branch_indices
# This function augments branch outputs to agree with the merged residual
# format: each branch is made to return zero-filled values in the places of
# residual outputs that it does not populate.
def _join_cond_outputs(jaxprs, all_res_avals, res_aval_indices_per_jaxpr,
num_non_res_outputs):
def augment_jaxpr(jaxpr, res_indices):
@lu.wrap_init
def f_aug(*args):
outs_and_residuals = core.jaxpr_as_fun(jaxpr)(*args)
outs, residuals = split_list(outs_and_residuals, [num_non_res_outputs])
aug_residuals = _map(ad_util.zeros_like_aval, all_res_avals)
aug_residuals = util.subvals(aug_residuals, zip(res_indices, residuals))
return outs + list(aug_residuals)
return _make_closed_jaxpr(f_aug, jaxpr.in_avals)
return tuple(_map(augment_jaxpr, jaxprs, res_aval_indices_per_jaxpr))
# This function augments branch inputs to agree with the merged residual format:
# each branch is made to accept all residuals, even though it will ignore those
# that it does not read.
def _join_cond_pe_staged_jaxpr_inputs(jaxprs, all_res_avals,
res_aval_indices_per_jaxpr):
newvar = core.gensym([j.jaxpr for j in jaxprs], suffix='_')
all_res_vars = _map(newvar, all_res_avals)
def augment_jaxpr(jaxpr, res_indices):
num_res = len(res_indices)
res_vars = jaxpr.jaxpr.invars[:num_res]
non_res_vars = jaxpr.jaxpr.invars[num_res:]
aug_res_vars = list(util.subvals(all_res_vars, zip(res_indices, res_vars)))
aug_invars = aug_res_vars + non_res_vars
jaxpr_aug = core.Jaxpr(jaxpr.jaxpr.constvars, aug_invars,
jaxpr.jaxpr.outvars, jaxpr.jaxpr.eqns)
jaxpr_aug = core.ClosedJaxpr(jaxpr_aug, jaxpr.consts)
return jaxpr_aug
return tuple(_map(augment_jaxpr, jaxprs, res_aval_indices_per_jaxpr))
def _ordered_unique(xs):
d = collections.OrderedDict((x, None) for x in xs)
return list(d.keys())
def _transpose_cond_jaxpr(jaxpr, num_res):
res_avals, primal_avals = split_list(jaxpr.in_avals, [num_res])
primal_avals = _map(raise_to_shaped, primal_avals)
@lu.wrap_init
def transposed(*args):
res, cts_out = split_list(args, [num_res])
primals = res + [ad.UndefinedPrimal(aval) for aval in primal_avals]
cts_in = ad.backward_pass(
jaxpr.jaxpr, jaxpr.consts, primals, cts_out)
_, cts_in = split_list(cts_in, [num_res])
return _map(ad.instantiate_zeros_aval, primal_avals, cts_in)
return _make_closed_jaxpr(transposed, res_avals + jaxpr.out_avals)
def _cond_transpose(cts, *args, branches, linear):
index, *ops = args
in_avals = _map(raise_to_shaped, branches[0].in_avals)
num_res = len(ops) - sum(linear)
branches_trans = tuple(
_transpose_cond_jaxpr(jaxpr, num_res) for jaxpr in branches)
lin_in_avals = [raise_to_shaped(a, weak_type=False)
for a, l in zip(in_avals, linear) if l]
assert all(core.typematch(out_aval, lin_in_aval)
for jaxpr in branches_trans
for out_aval, lin_in_aval in zip(jaxpr.out_avals, lin_in_avals))
res = ops[:num_res]
cts = _map(ad.instantiate_zeros_aval, branches[0].out_avals, cts)
linear_trans = (False,) * num_res + (True,) * len(cts)
out = cond_p.bind(
index, *res, *cts, branches=branches_trans, linear=linear_trans)
assert all(_map(core.typecheck, lin_in_avals, out))
out_iter = iter(out)
out = [next(out_iter) if l else None for l in linear]
assert next(out_iter, None) is None
return [None] + out
def _avals_short(avals):
to_str = lambda aval: getattr(aval, 'str_short', partial(str, aval))()
return ' '.join(_map(to_str, avals))
def _cond_typecheck(*avals, branches, linear):
tc = partial(_typecheck_param, 'cond')
tc(branches, 'branches', 'tuple of ClosedJaxpr',
type(branches) is tuple and
all(type(x) is core.ClosedJaxpr for x in branches))
tc(linear, 'linear', 'tuple of bool',
type(linear) is tuple and all(type(x) is bool for x in linear))
core.typecheck_assert(
len(branches) > 0,
'cond requires at least one branch function')
core.typecheck_assert(
len(linear) + 1 == len(avals),
f'cond given {len(linear)} linear flags for '
f'{len(avals) - 1} non-predicate operands')
jaxpr0 = branches[0]
jaxpr0_in_avals_str = _avals_short(jaxpr0.in_avals)
jaxpr0_out_avals_str = _avals_short(jaxpr0.out_avals)
for i, jaxpr in enumerate(branches[1:]):
core.typecheck_assert(
len(jaxpr0.in_avals) == len(jaxpr.in_avals),
f'cond branch 0 takes {len(jaxpr0.in_avals)} inputs, '
f'branch {i+1} takes {len(jaxpr.in_avals)}')
core.typecheck_assert(
len(jaxpr0.out_avals) == len(jaxpr.out_avals),
f'cond branch 0 outputs {len(jaxpr0.out_avals)} values, '
f'branch {i+1} outputs {len(jaxpr.out_avals)}')
core.typecheck_assert(
all(_map(core.typematch, jaxpr0.in_avals, jaxpr.in_avals)),
f'cond branches 0 and {i+1} have mismatching input types: '
f'{jaxpr0_in_avals_str} vs {_avals_short(jaxpr.in_avals)}')
core.typecheck_assert(
all(_map(core.typematch, jaxpr0.out_avals, jaxpr.out_avals)),
f'cond branches 0 and {i+1} have mismatching output types: '
f'{jaxpr0_out_avals_str} vs {_avals_short(jaxpr.out_avals)}')
core.typecheck_assert(
len(avals) == 1 + len(jaxpr0.in_avals),
f'cond called with {len(avals) - 1} non-predicate operands, '
f'but branches take {len(jaxpr0.in_avals)} inputs')
index_aval, *op_avals = avals
core.typecheck_assert(
index_aval.dtype == np.int32,
f'cond called with index of type {index_aval.dtype} instead of int32')
core.typecheck_assert(
all(_map(core.typecompat, jaxpr0.in_avals, op_avals)),
f'cond branches take input types {jaxpr0_in_avals_str}, '
f'called with operands of type {_avals_short(op_avals)}')
def cond_bind(*args, branches, linear):
if config.jax_enable_checks:
avals = _map(core.get_aval, args)
_cond_typecheck(*avals, branches=branches, linear=linear)
for jaxpr in branches:
core.check_jaxpr(jaxpr.jaxpr)
return core.Primitive.bind(cond_p, *args, branches=branches, linear=linear)
cond_p = lax.Primitive('cond')
cond_p.multiple_results = True
cond_p.def_impl(partial(xla.apply_primitive, cond_p))
cond_p.def_abstract_eval(_cond_abstract_eval)
cond_p.def_custom_bind(cond_bind)
ad.primitive_jvps[cond_p] = _cond_jvp
ad.primitive_transposes[cond_p] = _cond_transpose
pe.custom_partial_eval_rules[cond_p] = _cond_partial_eval
batching.initial_style_batchers[cond_p] = _cond_batching_rule
xla.initial_style_translations[cond_p] = _cond_translation_rule
core.custom_typechecks[cond_p] = _cond_typecheck
### scan
Carry = TypeVar('Carry')
X = TypeVar('X')
Y = TypeVar('Y')
@api_boundary
def scan(f: Callable[[Carry, X], Tuple[Carry, Y]],
init: Carry,
xs: X,
length: Optional[int] = None,
reverse: bool = False,
unroll: int = 1) -> Tuple[Carry, Y]:
"""Scan a function over leading array axes while carrying along state.
The type signature in brief is
.. code-block:: haskell
scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])
where we use [t] here to denote the type t with an additional leading axis.
That is, if t is an array type then [t] represents the type with an additional
leading axis, and if t is a pytree (container) type with array leaves then [t]
represents the type with the same pytree structure and corresponding leaves
each with an additional leading axis.
When ``a`` is an array type or None, and ``b`` is an array type, the semantics
of ``scan`` are given roughly by this Python implementation::
def scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
Unlike that Python version, both ``a`` and ``b`` may be arbitrary pytree
types, and so multiple arrays can be scanned over at once and produce multiple
output arrays. (None is actually an empty pytree.)
Also unlike that Python version, ``scan`` is a JAX primitive and is lowered to
a single XLA While HLO. That makes it useful for reducing compilation times
for jit-compiled functions, since native Python loop constructs in an ``@jit``
function are unrolled, leading to large XLA computations.
Finally, the loop-carried value ``carry`` must hold a fixed shape and dtype
across all iterations (and not just be consistent up to NumPy rank/shape
broadcasting and dtype promotion rules, for example). In other words, the type
``c`` in the type signature above represents an array with a fixed shape and
dtype (or a nested tuple/list/dict container data structure with a fixed
structure and arrays with fixed shape and dtype at the leaves).
Args:
f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning
that ``f`` accepts two arguments where the first is a value of the loop
carry and the second is a slice of ``xs`` along its leading axis, and that
``f`` returns a pair where the first element represents a new value for
the loop carry and the second represents a slice of the output.
init: an initial loop carry value of type ``c``, which can be a scalar,
array, or any pytree (nested Python tuple/list/dict) thereof, representing
the initial loop carry value. This value must have the same structure as
the first element of the pair returned by ``f``.
xs: the value of type ``[a]`` over which to scan along the leading axis,
where ``[a]`` can be an array or any pytree (nested Python
tuple/list/dict) thereof with consistent leading axis sizes.
length: optional integer specifying the number of loop iterations, which
must agree with the sizes of leading axes of the arrays in ``xs`` (but can
be used to perform scans where no input ``xs`` are needed).
reverse: optional boolean specifying whether to run the scan iteration
forward (the default) or in reverse, equivalent to reversing the leading
axes of the arrays in both ``xs`` and in ``ys``.
unroll: optional positive int specifying, in the underlying operation of the
scan primitive, how many scan iterations to unroll within a single
iteration of a loop.
Returns:
A pair of type ``(c, [b])`` where the first element represents the final
loop carry value and the second element represents the stacked outputs of
the second output of ``f`` when scanned over the leading axis of the inputs.
"""
xs_flat, xs_tree = tree_flatten(xs)
try:
lengths = [x.shape[0] for x in xs_flat]
except AttributeError as err:
msg = "scan got value with no leading axis to scan over: {}."
raise ValueError(
msg.format(', '.join(str(x) for x in xs_flat
if not hasattr(x, 'shape')))) from err
if length is not None:
length = int(length)
if not all(length == l for l in lengths):
msg = ("scan got `length` argument of {} which disagrees with "
"leading axis sizes {}.")
raise ValueError(msg.format(length, [x.shape[0] for x in xs_flat]))
else:
unique_lengths = set(lengths)
if len(unique_lengths) > 1:
msg = "scan got values with different leading axis sizes: {}."
raise ValueError(msg.format(', '.join(str(x.shape[0]) for x in xs_flat)))
elif len(unique_lengths) == 0:
msg = "scan got no values to scan over and `length` not provided."
raise ValueError(msg)
else:
length, = unique_lengths
if config.jax_disable_jit:
carry = init
ys = []
maybe_reversed = reversed if reverse else lambda x: x
for i in maybe_reversed(range(length)):
xs_slice = [_index_array(i, core.get_aval(x), x) for x in xs_flat]
carry, y = f(carry, tree_unflatten(xs_tree, xs_slice))
ys.append(y)
stack = lambda y, *ys: (y if core.get_aval(y) is core.abstract_unit
else jax.numpy.stack((y, *ys)))
stacked_y = tree_multimap(stack, *maybe_reversed(ys))
return carry, stacked_y
x_shapes = [masking.padded_shape_as_value(x.shape[1:]) for x in xs_flat]
x_dtypes = [x.dtype for x in xs_flat]
x_avals = tuple(_map(ShapedArray, x_shapes, x_dtypes))
def _create_jaxpr(init):
init_flat, init_tree = tree_flatten(init)
in_flat, in_tree = tree_flatten((init, xs))
carry_avals = tuple(_map(_abstractify, init_flat))
jaxpr, consts, out_tree = _initial_style_jaxpr(
f, in_tree, carry_avals + x_avals, "scan")
out_tree_children = out_tree.children()
if len(out_tree_children) != 2:
msg = "scan body output must be a pair, got {}."
raise TypeError(msg.format(tree_unflatten(out_tree, jaxpr.out_avals)))
carry_avals_out = jaxpr.out_avals[:out_tree_children[0].num_leaves]
return init_flat, carry_avals, carry_avals_out, init_tree, in_flat, jaxpr, consts, out_tree, out_tree_children
# The carry input and output avals must match exactly. However, we want to account for
# the case when init contains weakly-typed values (e.g. Python scalars), with avals that
# may not match the output despite being compatible by virtue of their weak type.
# To do this, we compute the jaxpr in two passes: first with the raw inputs, and if
# necessary, a second time with modified init values.
init_flat, carry_avals, carry_avals_out, init_tree, *rest = _create_jaxpr(init)
new_init_flat, changed = _promote_weak_typed_inputs(init_flat, carry_avals, carry_avals_out)
if changed:
new_init = tree_unflatten(init_tree, new_init_flat)
init_flat, carry_avals, carry_avals_out, init_tree, *rest = _create_jaxpr(new_init)
in_flat, jaxpr, consts, out_tree, out_tree_children = rest
_check_tree_and_avals("scan carry output and input",
# Extract the subtree and avals for the first element of the return tuple
out_tree_children[0], carry_avals_out,
init_tree, carry_avals)
out = scan_p.bind(*itertools.chain(consts, in_flat),
reverse=reverse, length=length, jaxpr=jaxpr,
num_consts=len(consts), num_carry=len(init_flat),
linear=(False,) * (len(consts) + len(in_flat)),
unroll=unroll)
return tree_unflatten(out_tree, out)
def _scan_impl_unrolled(*args, reverse, length, num_consts, num_carry, linear,
f_impl, x_avals, y_avals):
consts, init, xs = split_list(args, [num_consts, num_carry])
carry = init
ys = []
for i in range(length):
i_ = length - i - 1 if reverse else i
x = _map(partial(_index_array, i_), x_avals, xs)
out = f_impl(*consts, *carry, *x)
carry, y = split_list(out, [num_carry])
ys.append(y)
ys = list(reversed(ys)) if reverse else ys
ys = list(zip(*ys))
ys = _map(_stack, y_avals, ys)
return (*carry, *ys)
def _scan_impl_loop(*args, reverse, length, num_consts, num_carry, linear,
f_impl, x_avals, y_avals):
consts, init, xs = split_list(args, [num_consts, num_carry])
def cond_fun(vals):
i, *_ = vals
return i < length
def body_fun(vals):
[i], carry, ys = split_list(vals, [1, num_carry])
i_ = length - i - 1 if reverse else i
x = _map(partial(_dynamic_index_array, i_), x_avals, xs)
out_flat = f_impl(*consts, *carry, *x)
carry_out, y_updates = split_list(out_flat, [num_carry])
ys_out = _map(partial(_update_array, i_), y_avals, ys, y_updates)
return [i + 1] + carry_out + ys_out
ys_init = _map(partial(_empty_array, length), y_avals)
if length == 0:
return init + ys_init
else:
init_val = [lax._const(length, 0)] + init + ys_init
_, *outs = while_loop(cond_fun, body_fun, init_val)
return outs
def _scan_impl_block_unrolled(*args, reverse, length, num_consts, num_carry,
linear, block_length, f_impl, x_avals, y_avals):
consts, init, xs = split_list(args, [num_consts, num_carry])
num_blocks, rem = divmod(length, block_length)
assert rem == 0
partition = partial(_partition_leading, num_blocks, block_length)
xs_block = _map(partition, x_avals, xs)
prepend_aval = partial(_prepend_dim_to_aval, block_length)
x_block_avals = _map(prepend_aval, x_avals)
y_block_avals = _map(prepend_aval, y_avals)
f_impl_block = partial(
_scan_impl_unrolled, reverse=reverse, length=block_length,
num_consts=num_consts, num_carry=num_carry, linear=linear,
f_impl=f_impl, x_avals=x_avals, y_avals=y_avals)
outs = _scan_impl_loop(
*consts, *init, *xs_block, reverse=reverse, length=num_blocks,
num_consts=num_consts, num_carry=num_carry, linear=linear,
f_impl=f_impl_block, x_avals=x_block_avals, y_avals=y_block_avals)
carry, ys_blocks = split_list(outs, [num_carry])
combine = partial(_combine_leading, num_blocks, block_length)
ys = _map(combine, y_avals, ys_blocks)
return (*carry, *ys)
def _scan_impl(*args, reverse, length, num_consts, num_carry, jaxpr, linear,
unroll):
_, _, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])
_, y_avals = split_list(jaxpr.out_avals, [num_carry])
f_impl = core.jaxpr_as_fun(jaxpr)
if unroll == 1:
return _scan_impl_loop(
*args, reverse=reverse, length=length, num_consts=num_consts,
num_carry=num_carry, linear=linear, f_impl=f_impl, x_avals=x_avals,
y_avals=y_avals)
consts, init, xs = split_list(args, [num_consts, num_carry])
num_blocks, rem = divmod(length, unroll)
length_div = num_blocks * unroll
if rem > 0:
if reverse:
split = partial(_split_leading_dim, rem)
xs_rem, xs = unzip2(_map(split, x_avals, xs))
else:
split = partial(_split_leading_dim, length_div)
xs, xs_rem = unzip2(_map(split, x_avals, xs))
outs = _scan_impl_block_unrolled(
*consts, *init, *xs, reverse=reverse, length=length_div,
num_consts=num_consts, num_carry=num_carry, linear=linear,
block_length=unroll, f_impl=f_impl, x_avals=x_avals, y_avals=y_avals)
carry, ys = split_list(outs, [num_carry])
if rem > 0:
outs = _scan_impl_unrolled(
*consts, *carry, *xs_rem, reverse=reverse, length=rem,
num_consts=num_consts, num_carry=num_carry, linear=linear,
f_impl=f_impl, x_avals=x_avals, y_avals=y_avals)
carry, ys_rem = split_list(outs, [num_carry])
if reverse:
ys = _map(_concatenate, y_avals, ys_rem, ys)
else:
ys = _map(_concatenate, y_avals, ys, ys_rem)
return (*carry, *ys)
def _stack(aval, vals):
if aval is core.abstract_unit:
return core.unit
else:
vals = [lax.expand_dims(x, (0,)) for x in vals]
return lax.concatenate(vals, 0)
def _concatenate(aval, x1, x2):
if aval is core.abstract_unit:
return core.unit
else:
return lax.concatenate([x1, x2], 0)
def _split_leading_dim(i, aval, x):
if aval is core.abstract_unit:
return (core.unit, core.unit)
else:
assert x.ndim >= 1
return (lax.slice_in_dim(x, 0, i),
lax.slice_in_dim(x, i, x.shape[0]))
def _dynamic_index_array(i, aval, x):
if aval is core.abstract_unit:
return core.unit
else:
return lax.dynamic_index_in_dim(x, i, keepdims=False)
def _index_array(i, aval, x):
if aval is core.abstract_unit:
return core.unit
else:
return lax.index_in_dim(x, i, keepdims=False)
def _empty_array(sz, aval):
if aval is core.abstract_unit:
return core.unit
else:
return lax.full((sz,) + aval.shape, 0, aval.dtype)
def _update_array(i, aval, xs, x):
if aval is core.abstract_unit:
return core.unit
else:
return lax.dynamic_update_index_in_dim(xs, x, i, 0)
def _partition_leading(sz0, sz1, aval, x):
if aval is core.abstract_unit:
return core.unit
else:
assert x.ndim >= 1
assert x.shape[0] == sz0 * sz1
return lax.reshape(x, (sz0, sz1, *x.shape[1:]))
def _combine_leading(sz0, sz1, aval, x):
if aval is core.abstract_unit:
return core.unit
else:
assert x.ndim >= 2
assert x.shape[0] == sz0
assert x.shape[1] == sz1
return lax.collapse(x, 0, 2)
def _prepend_dim_to_aval(sz, aval):
if aval is core.abstract_unit:
return aval
elif isinstance(aval, ShapedArray):
return aval.update(shape=(sz, *aval.shape), weak_type=False)
else:
raise TypeError(f'Prepending dim {sz} to aval {aval}')
def _scan_abstract_eval(*args, reverse, length, num_consts, num_carry, jaxpr,
linear, unroll):
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_prepend_dim_to_aval, length), y_avals)
return carry_avals + ys_avals
def _scan_jvp(primals, tangents, reverse, length, jaxpr, num_consts, num_carry,
linear, unroll):
num_xs = len(jaxpr.in_avals) - num_carry - num_consts
num_ys = len(jaxpr.out_avals) - num_carry
nonzeros = [type(t) is not ad_util.Zero for t in tangents]
const_nz, init_nz, xs_nz = split_list(nonzeros, [num_consts, num_carry])
# Fixpoint computation of which carry are not ad.zero: either
# non-zero from init, or the carry out is non-zero. Each iteration promotes
# at least one carry to non-zero. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_nz.
carry_nz = init_nz
for _ in range(1 + len(carry_nz)):
nonzeros = const_nz + carry_nz + xs_nz
jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(
jaxpr, nonzeros, instantiate=carry_nz + [False] * num_ys)
carry_nz_out, _ = nonzeros_out[:num_carry], nonzeros_out[num_carry:]
if carry_nz_out == carry_nz:
break
else:
carry_nz = _map(operator.or_, carry_nz, carry_nz_out)
else:
assert False, "Fixpoint not reached"
tangents = [ad.instantiate_zeros(t) if nz else t
for t, nz in zip(tangents, nonzeros)]
consts, init, xs = split_list(primals, [num_consts, num_carry])
all_tangents = split_list(tangents, [num_consts, num_carry])
consts_dot, init_dot, xs_dot = _map(_prune_zeros, all_tangents)
jaxpr_jvp_rearranged = ad.rearrange_binders(
jaxpr_jvp,
[num_consts, num_carry, num_xs], [len(consts_dot), len(init_dot), len(xs_dot)],
[num_carry, num_ys], [len(init_dot), sum(nonzeros_out) - len(init_dot)])
consts_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])
jaxpr_jvp_linear = tuple(consts_linear + [True] * len(consts_dot)
+ init_linear + [True] * len(init_dot)
+ xs_linear + [True] * len(xs_dot))
out_flat = scan_p.bind(
*(consts + consts_dot + init + init_dot + xs + xs_dot),
reverse=reverse, length=length, jaxpr=jaxpr_jvp_rearranged,
num_consts=num_consts + len(consts_dot),
num_carry=num_carry + len(init_dot),
linear=jaxpr_jvp_linear, unroll=unroll)
carry, carry_dot, ys, ys_dot = split_list(out_flat, [num_carry, len(init_dot), num_ys])
primals_out = carry + ys
tangents_out_iter = iter(carry_dot + ys_dot)
tangents_out = [next(tangents_out_iter) if nz else ad_util.Zero.from_value(p)
for p, nz in zip(primals_out, nonzeros_out)]
return primals_out, tangents_out
def _prune_zeros(ts):
return [t for t in ts if type(t) is not ad_util.Zero]
def _scan_partial_eval(trace, *tracers, reverse, length, num_consts, num_carry,
jaxpr, linear, unroll):
num_ys = len(jaxpr.out_avals) - num_carry
unknowns = [t.pval[0] is not None for t in tracers]
const_uk, init_uk, xs_uk = split_list(unknowns, [num_consts, num_carry])
# Fixpoint computation of which carry are unknown (not a constant): either
# unknown from init, or the carry out is unknown. Each iteration promotes
# at least one carry to unknown. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_uk.
carry_uk = init_uk
for _ in range(1 + len(carry_uk)):
unknowns = const_uk + carry_uk + xs_uk
jaxpr_1, jaxpr_2, out_uk = pe.partial_eval_jaxpr(
jaxpr, unknowns, instantiate=carry_uk + [False] * num_ys)
carry_uk_out = out_uk[:num_carry]
if carry_uk_out == carry_uk:
break
else:
carry_uk = _map(operator.or_, carry_uk, carry_uk_out)
else:
assert False, "Fixpoint not reached"
num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals)
# The residuals are treated as extensive outputs of jaxpr_1 (and extensive
# inputs to jaxpr_2), but residuals that are loop-invariant can be hoisted.
# TODO(mattjj): hoist other loop-invariant values here too (instantiate=False)
invariant_pvals = [pe.PartialVal.known(core.unit if uk else t.pval[1])
for uk, t in zip(unknowns[:num_consts], tracers[:num_consts])]
other_pvals = [pe.PartialVal.unknown(a) for a in jaxpr_1.in_avals[num_consts:]]
in_pvals_1 = invariant_pvals + other_pvals
jaxpr_1_opt, out_pvals_1, consts_1 = pe.trace_to_jaxpr(
lu.wrap_init(core.jaxpr_as_fun(jaxpr_1)), in_pvals_1,
instantiate=[True] * (num_carry + num_ys) + [False] * num_res)
jaxpr_1_opt = pe.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr_1_opt), ())
num_consts_1 = num_consts + len(consts_1)
# any now-known residuals are intensive, so we want to revise jaxpr_2 to take
# those inputs as constants rather than as extensive inputs
_, _, res_pvals = split_list(out_pvals_1, [num_carry, num_ys])
intensive_residuals = [const for pv, const in res_pvals if pv is None]
move = [False] * len(jaxpr_1.in_avals) + [pv is None for pv, _ in res_pvals]
jaxpr_2_opt = pe.move_binders_to_front(jaxpr_2, move)
num_consts_2 = num_consts + len(intensive_residuals)
# As another optimization, for any extensive inputs that are just forwarded to
# extensive outputs, to avoid a copy (looping over dynamic-update-slice) we'd
# rather just forward the input tracer. That means pruning some extensive
# outputs from the jaxpr here, and updating out_flat below.
extensive_invars = jaxpr_1_opt.jaxpr.invars[num_consts_1 + num_carry:]
extensive_outvars = jaxpr_1_opt.jaxpr.outvars[num_carry:]
extensive_avals = [core.unmapped_aval(length, 0, core.raise_to_shaped(v.aval))
for v in extensive_outvars]
fwd_extensive = [num_consts + num_carry + extensive_invars.index(v)
if v in extensive_invars else None for v in extensive_outvars]
jaxpr_1_opt.jaxpr.outvars = (
jaxpr_1_opt.jaxpr.outvars[:num_carry] +
[v for i, v in zip(fwd_extensive, extensive_outvars) if i is None])
in_consts = (list(consts_1) + [core.unit] * num_consts +
[core.unit if uk else t.pval[1]
for uk, t in zip(unknowns[num_consts:], tracers[num_consts:])])
linear_1 = ([False] * len(consts_1) + [True] * num_consts +
[lin or uk for uk, lin
in zip(unknowns[num_consts:], linear[num_consts:])])
out_flat = scan_p.bind(
*in_consts, reverse=reverse, length=length, jaxpr=jaxpr_1_opt,
num_consts=num_consts_1, num_carry=num_carry, linear=tuple(linear_1),
unroll=unroll)
# Propagate the forwarded extensive outputs using fwd_extensive. Any
# numpy.ndarray inputs should be converted to JAX DeviceArrays.
out_carry, out_extensive = split_list(out_flat, [num_carry])
out_extensive_iter = iter(out_extensive)
out_extensive = [next(out_extensive_iter) if i is None
else _maybe_device_put(tracers[i].pval[1]) if tracers[i].is_known()
else tracers[i] for i in fwd_extensive]
assert all(a.strip_named_shape() == core.raise_to_shaped(
core.get_aval(out)).strip_named_shape()
for a, out in zip(extensive_avals, out_extensive))
out_flat = out_carry + out_extensive
out_carry, ys, res_and_units = split_list(out_flat, [num_carry, num_ys])
extensive_residuals = [r for r, (pv, _) in zip(res_and_units, res_pvals) if pv is not None]
new_tracers = [trace.instantiate_const(t) if uk else trace.new_instantiated_literal(core.unit)
for uk, t in zip(unknowns, tracers)]
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_prepend_dim_to_aval, length), y_avals)
out_avals = carry_avals + ys_avals
out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uk)]
out_consts = out_carry + ys
int_res_tracers = _map(trace.new_instantiated_const, intensive_residuals)
ext_res_tracers = _map(trace.new_instantiated_const, extensive_residuals)
out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)
for pv, const in zip(out_pvs, out_consts)]
linear_2 = ([False] * len(int_res_tracers) +
[lin or not uk for uk, lin in zip(unknowns, linear)] +
[False] * len(ext_res_tracers))
eqn = pe.new_eqn_recipe(int_res_tracers + new_tracers + ext_res_tracers,
out_tracers, scan_p,
dict(reverse=reverse, length=length, jaxpr=jaxpr_2_opt,
num_consts=num_consts_2,
num_carry=num_carry, linear=tuple(linear_2),
unroll=unroll),
source_info_util.current())
for t in out_tracers: t.recipe = eqn
return out_tracers
def _maybe_device_put(x):
if isinstance(x, np.ndarray):
return lax._device_put_raw(x)
else:
return x
def _scan_transpose(cts, *args, reverse, length, num_consts, num_carry, jaxpr,
linear, unroll):
# we've only implemented transposing scans with specific lin/nonlin patterns
consts_lin, init_lin, xs_lin = split_list(linear, [num_consts, num_carry])
num_ires = len(consts_lin) - sum(consts_lin)
num_eres = len(xs_lin) - sum(xs_lin)
if consts_lin != [False] * num_ires + [True] * (len(consts_lin) - num_ires):
raise NotImplementedError
if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres:
raise NotImplementedError
if not all(init_lin):
pass # TODO(mattjj): error check https://github.com/google/jax/issues/1963
consts, _, xs = split_list(args, [num_consts, num_carry])
ires, _ = split_list(consts, [num_ires])
_, eres = split_list(xs, [sum(xs_lin)])
assert not any(ad.is_undefined_primal(r) for r in ires)
assert not any(ad.is_undefined_primal(r) for r in eres)
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_prepend_dim_to_aval, length), y_avals)
ct_carry, ct_ys = split_list(cts, [num_carry])
ct_carry = _map(ad.instantiate_zeros_aval, carry_avals, ct_carry)
ct_ys = _map(ad.instantiate_zeros_aval, ys_avals, ct_ys)
ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[num_ires:num_consts])
# jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b])
# jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a])
jaxpr_trans = _transpose_scan_jaxpr(
num_ires, num_consts - num_ires, num_eres, jaxpr)
linear_trans = ([False] * num_ires +
[True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) +
[False] * num_eres)
outs = scan_p.bind(
*(ires + ct_consts + ct_carry + ct_ys + eres), reverse=not reverse,
length=length, jaxpr=jaxpr_trans, num_consts=num_ires,
num_carry=num_consts-num_ires+num_carry, linear=tuple(linear_trans),
unroll=unroll)
ct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry])
return [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres
# transpose_scan_jaxpr :: ([res1, c, a, res2] -> b)
# -> ([res1, CT c, CT b, res2] -> [CT c, CT a])
def _transpose_scan_jaxpr(num_res1, num_c, num_res2, jaxpr):
num_a = len(jaxpr.in_avals) - num_res1 - num_c - num_res2
res1_avals, c_avals, a_avals, res2_avals = split_list(
jaxpr.in_avals, [num_res1, num_c, num_a])
num_b = len(jaxpr.out_avals)
b_avals = list(jaxpr.out_avals)
@lu.wrap_init
def transposed(*res1_cbar_bbar_res2):
res1, c_bar, b_bar, res2 = split_list(
res1_cbar_bbar_res2, [num_res1, num_c, num_b])
primals = (res1 + [ad.UndefinedPrimal(aval) for aval in c_avals] +
[ad.UndefinedPrimal(aval) for aval in a_avals] + res2)
cbar_abar = ad.backward_pass(jaxpr.jaxpr, jaxpr.consts, primals, b_bar)
_, new_c_bar, a_bar, _ = split_list(cbar_abar, [num_res1, num_c, num_a])
a_bar = _map(ad.instantiate_zeros_aval, a_avals, a_bar)
c_bar = _map(ad.instantiate_zeros_aval, c_avals,
_map(ad.add_tangents, c_bar, new_c_bar))
return c_bar + a_bar
return _make_closed_jaxpr(transposed, res1_avals + c_avals + b_avals + res2_avals)
def _make_closed_jaxpr(traceable: lu.WrappedFun, in_avals: Sequence[core.AbstractValue]):
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(traceable, in_avals)
return core.ClosedJaxpr(jaxpr, consts)
def _scan_batching_rule(args, dims, axis_name, main_type, reverse, length, jaxpr, num_consts,
num_carry, linear, unroll):
num_ys = len(jaxpr.out_avals) - num_carry
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
orig_batched = [d is not batching.not_mapped for d in dims]
const_batched, init_batched, xs_batched = split_list(orig_batched, [num_consts, num_carry])
# Fixpoint computation of which carry are batched: either
# batched from init, or the carry out is batched. Each iteration promotes
# at least one carry to batched. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_batched.
carry_batched = init_batched
for _ in range(1 + len(carry_batched)):
batched = const_batched + carry_batched + xs_batched
jaxpr_batched, batched_out = batching.batch_jaxpr(
jaxpr, size, batched,
instantiate=carry_batched + [False] * num_ys,
axis_name=axis_name,
main_type=main_type)
carry_batched_out, ys_batched = batched_out[:num_carry], batched_out[num_carry:]
if carry_batched_out == carry_batched:
break
else:
carry_batched = _map(operator.or_, carry_batched, carry_batched_out)
else:
assert False, "Fixpoint not reached"
consts, init, xs = split_list(args, [num_consts, num_carry])
consts_bdims, init_bdims, xs_bdims = split_list(dims, [num_consts, num_carry])
new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0
else x for x, d in zip(consts, consts_bdims)]
new_init = [batching.broadcast(x, size, 0) if now_batched and not was_batched
else batching.moveaxis(x, d, 0) if now_batched else x
for x, d, was_batched, now_batched in
zip(init, init_bdims, init_batched, carry_batched)]
new_xs = [batching.moveaxis(x, d, 1) if d is not batching.not_mapped and d != 1
else x for x, d in zip(xs, xs_bdims)]
new_args = new_consts + new_init + new_xs
outs = scan_p.bind(
*new_args, reverse=reverse, length=length, jaxpr=jaxpr_batched,
num_consts=num_consts, num_carry=num_carry, linear=linear, unroll=unroll)
carry_bdims = [0 if b else batching.not_mapped for b in carry_batched]
ys_bdims = [1 if b else batching.not_mapped for b in ys_batched]
return outs, carry_bdims + ys_bdims
def _scan_masking_rule(padded_vals, logical_shapes, reverse, length,
jaxpr, num_consts, num_carry, linear, unroll):
dynamic_length, = masking.shape_as_value((length,))
masked_jaxpr = _masked_scan_jaxpr(jaxpr, num_consts, num_carry)
consts, init, xs = split_list(padded_vals, [num_consts, num_carry])
max_length, = {x.shape[0] for x in xs}
const_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])
out_vals = scan_p.bind(
*itertools.chain([dynamic_length] + consts, [0], init, xs),
reverse=reverse, length=max_length, jaxpr=masked_jaxpr,
num_consts=1 + num_consts, num_carry=1 + num_carry,
linear=tuple([False] + const_linear + [False] + init_linear + xs_linear),
unroll=unroll)
return out_vals[1:]
def _masked_scan_jaxpr(jaxpr, num_consts, num_carry):
fun = core.jaxpr_as_fun(jaxpr)
@lu.wrap_init
def masked(*args):
[dynamic_length], consts, [i], carry, xs = split_list(
args, [1, num_consts, 1, num_carry])
out = fun(*(consts + carry + xs))
new_carry, ys = split_list(out, [num_carry])
new_carry = [lax.select(i < dynamic_length, new_c, c)
for new_c, c in zip(new_carry, carry)]
return [i + 1] + new_carry + ys
aval = ShapedArray((), dtypes.int_)
const_avals, carry_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])
return _make_closed_jaxpr(masked, [aval] + const_avals + [aval] + carry_avals + x_avals)
def _scan_typecheck(bind_time, *avals, reverse, length, num_consts, num_carry,
jaxpr, linear, unroll):
tc = partial(_typecheck_param, 'scan')
tc(reverse, 'reverse', 'bool', type(reverse) is bool)
tc(num_consts, 'num_consts', 'non-negative int',
type(num_consts) is int and num_consts >= 0)
tc(num_carry, 'num_carry', 'non-negative int',
type(num_carry) is int and num_carry >= 0)
tc(jaxpr, 'jaxpr', 'ClosedJaxpr', type(jaxpr) is core.ClosedJaxpr)
tc(linear, 'linear', 'tuple of bool',
type(linear) is tuple and all(type(x) is bool for x in linear))
tc(unroll, 'unroll', 'positive int', type(unroll) is int and unroll > 0)
length_types = (int, masking.Poly) if bind_time else (int,)
tc(length, 'length', 'non-negative int',
type(length) in length_types and length >= 0)
core.typecheck_assert(
len(linear) == len(avals),
f'scan param linear has length {len(linear)} for {len(avals)} operands')
const_avals, init_avals, x_avals = split_list(avals, [num_consts, num_carry])
const_avals_jaxpr, init_avals_jaxpr, x_avals_jaxpr = split_list(
jaxpr.in_avals, [num_consts, num_carry])
carry_avals_jaxpr, _ = split_list(jaxpr.out_avals, [num_carry])
x_avals_mapped = _map(partial(core.mapped_aval, length, 0), x_avals)
core.typecheck_assert(
all(_map(core.typematch, init_avals_jaxpr, carry_avals_jaxpr)),
f'scan input carry input and output types mismatch: '
f'\n{_avals_short(init_avals_jaxpr)}\nvs\n{_avals_short(carry_avals_jaxpr)}')
core.typecheck_assert(
all(_map(core.typecompat, const_avals_jaxpr, const_avals)),
f'scan jaxpr takes input const types\n{_avals_short(const_avals_jaxpr)},\n'
f'called with consts of type\n{_avals_short(const_avals)}')
core.typecheck_assert(
all(_map(core.typecompat, init_avals_jaxpr, init_avals)),
f'scan jaxpr takes input carry types\n{_avals_short(init_avals_jaxpr)},\n'
f'called with initial carry of type\n{_avals_short(init_avals)}')
core.typecheck_assert(
all(_map(core.typecompat, x_avals_jaxpr, x_avals_mapped)),
f'scan jaxpr takes input sequence types\n{_avals_short(x_avals_jaxpr)},\n'
f'called with sequence of type\n{_avals_short(x_avals)}')
def scan_bind(*args, **params):
if config.jax_enable_checks:
avals = _map(core.get_aval, args)
_scan_typecheck(True, *avals, **params)
core.check_jaxpr(params['jaxpr'].jaxpr)
return core.Primitive.bind(scan_p, *args, **params)
scan_p = core.Primitive("scan")
scan_p.multiple_results = True
scan_p.def_custom_bind(scan_bind)
scan_p.def_impl(partial(xla.apply_primitive, scan_p))
scan_p.def_abstract_eval(_scan_abstract_eval)
ad.primitive_jvps[scan_p] = _scan_jvp
ad.primitive_transposes[scan_p] = _scan_transpose
pe.custom_partial_eval_rules[scan_p] = _scan_partial_eval
xla.initial_style_translations[scan_p] = xla.lower_fun_initial_style(_scan_impl)
batching.initial_style_batchers[scan_p] = _scan_batching_rule
masking.masking_rules[scan_p] = _scan_masking_rule
core.custom_typechecks[scan_p] = partial(_scan_typecheck, False)
@api_boundary
def map(f, xs):
"""Map a function over leading array axes.
Like Python's builtin map, except inputs and outputs are in the form of
stacked arrays. Consider using the ``jax.vmap`` transform instead, unless you
need to apply a function element by element for reduced memory usage or
heterogeneous computation with other control flow primitives.
When ``xs`` is an array type, the semantics of ``map`` are given by this
Python implementation::
def map(f, xs):
return np.stack([f(x) for x in xs])
Like ``scan``, ``map`` is implemented in terms of JAX primitives so many of
the same advantages over a Python loop apply: ``xs`` may be an arbitrary
nested pytree type, and the mapped computation is compiled only once.
Args:
f: a Python function to apply element-wise over the first axis or axes of
``xs``.
xs: values over which to map along the leading axis.
Returns:
Mapped values.
"""
g = lambda _, x: ((), f(x))
_, ys = scan(g, (), xs)
return ys
def _concat_masking_rule(padded_vals, logical_shapes, dimension):
result = lax.concatenate(padded_vals, dimension) # fragmented
offset = 0
for padded_val, logical_shape in zip(padded_vals, logical_shapes):
result = _memcpy(dimension, logical_shape[dimension], padded_val,
result, offset)
offset = offset + logical_shape[dimension]
return result
def _memcpy(axis, num, src, dst, offset):
def body(i, dst):
update = lax.dynamic_index_in_dim(src, i, axis)
return lax.dynamic_update_index_in_dim(dst, update, i + offset, axis)
return fori_loop(0, num, body, dst)
masking.masking_rules[lax.concatenate_p] = _concat_masking_rule # type: ignore
def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):
"""Raises TypeError if (tree1, avals1) does not match (tree2, avals2).
Corresponding `tree` and `avals` must match in the sense that the number of
leaves in `tree` must be equal to the length of `avals`. `what` will be
prepended to details of the mismatch in TypeError.
"""
if tree1 != tree2:
raise TypeError(
f"{what} must have same type structure, got {tree1} and {tree2}.")
if not all(_map(core.typematch, avals1, avals2)):
raise TypeError(
f"{what} must have identical types, got\n"
f"{tree_unflatten(tree1, avals1)}\nand\n"
f"{tree_unflatten(tree2, avals2)}.")
def _check_tree(func_name, expected_name, actual_tree, expected_tree, has_aux=False):
if has_aux:
actual_tree_children = actual_tree.children()
if len(actual_tree_children) == 2:
# select first child as result tree
actual_tree = tree_structure(actual_tree_children[0])
else:
raise ValueError(
f"{func_name}() produced a pytree with structure "
f"{actual_tree}, but a pytree tuple with auxiliary "
f"output was expected because has_aux was set to True.")
if actual_tree != expected_tree:
raise TypeError(
f"{func_name}() output pytree structure must match {expected_name}, "
f"got {actual_tree} and {expected_tree}.")
def _promote_weak_typed_inputs(in_vals, in_avals, out_avals):
"""Promote weakly-typed in_vals to be compatible with out_avals.
Args:
in_vals : flattened list of input values.
in_avals : corresponding list of avals.
out_avals : list of target output avals.
Returns:
in_vals_new : flattened list of modified in_vals with no weak types.
changed : bool; true if in_vals required modification.
"""
if len(in_vals) != len(in_avals) or len(in_avals) != len(out_avals):
# Calling function is responsible for catching this.
return in_vals, False
weak_mismatches = [i for i, (a1, a2) in enumerate(zip(in_avals, out_avals))
if getattr(a1, 'weak_type', False) and not core.typematch(a1, a2)]
if not weak_mismatches:
return in_vals, False
for i in weak_mismatches:
new_dtype = dtypes.result_type(in_vals[i], out_avals[i])
in_vals[i] = lax.convert_element_type(in_vals[i], new_dtype)
return in_vals, True
def _stop_gradient_fun(f):
"""Create a version of f() that stops all gradients."""
def wrapper(*args, **kwargs):
args_flat, in_args_tree = tree_flatten((args, kwargs))
args_avals = tuple(_map(_abstractify, args_flat))
g = lambda a, b: f(*a, **b)
jaxpr, consts, out_tree = _initial_style_jaxpr(g, in_args_tree, args_avals)
all_args = _map(lax.stop_gradient, (*consts, *args_flat))
out = core.jaxpr_as_fun(jaxpr)(*all_args)
return tree_unflatten(out_tree, out)
return wrapper
_RootTuple = collections.namedtuple('_RootTuple', 'f, solve, l_and_s')
def _split_root_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _RootTuple(*params_list[:-1]), params_list[-1]
@api_boundary
def custom_root(f, initial_guess, solve, tangent_solve):
"""Differentiably solve for a roots of a function.
This is a low-level routine, mostly intended for internal use in JAX.
Gradients of custom_root() are defined with respect to closed-over variables
from the provided function ``f`` via the implicit function theorem:
https://en.wikipedia.org/wiki/Implicit_function_theorem
Args:
f: function for which to find a root. Should accept a single argument,
return a tree of arrays with the same structure as its input.
initial_guess: initial guess for a zero of f.
solve: function to solve for the roots of f. Should take two positional
arguments, f and initial_guess, and return a solution with the same
structure as initial_guess such that func(solution) = 0. In other words,
the following is assumed to be true (but not checked)::
solution = solve(f, initial_guess)
error = f(solution)
assert all(error == 0)
tangent_solve: function to solve the tangent system. Should take two
positional arguments, a linear function ``g`` (the function ``f``
linearized at its root) and a tree of array(s) ``y`` with the same
structure as initial_guess, and return a solution ``x`` such that
``g(x)=y``:
- For scalar ``y``, use ``lambda g, y: y / g(1.0)``.
- For vector ``y``, you could use a linear solve with the Jacobian, if
dimensionality of ``y`` is not too large:
``lambda g, y: np.linalg.solve(jacobian(g)(y), y)``.
Returns:
The result of calling solve(f, initial_guess) with gradients defined via
implicit differentiation assuming ``f(solve(f, initial_guess)) == 0``.
"""
guess_flat, in_args_tree = tree_flatten((initial_guess,))
guess_avals = tuple(_map(_abstractify, guess_flat))
f_jaxpr, f_consts, out_tree = _initial_style_jaxpr(
f, in_args_tree, guess_avals)
in_tree, = treedef_children(in_args_tree)
_check_tree("f", "initial_guess", out_tree, in_tree)
solve_jaxpr, solve_consts, solution_tree = _initial_style_jaxpr(
partial(solve, _stop_gradient_fun(f)), in_args_tree, guess_avals)
_check_tree("solve", "initial_guess", solution_tree, in_tree)
def linearize_and_solve(x, b):
unchecked_zeros, f_jvp = jax.linearize(f, x)
return tangent_solve(f_jvp, b)
l_and_s_jaxpr, l_and_s_consts, out_tree = _initial_style_jaxpr(
linearize_and_solve, treedef_tuple((in_tree,) * 2), guess_avals * 2)
_check_tree("tangent_solve", "x", out_tree, in_tree)
all_consts = [f_consts, solve_consts, l_and_s_consts]
const_lengths = _RootTuple(*_map(len, all_consts))
jaxprs = _RootTuple(f_jaxpr, solve_jaxpr, l_and_s_jaxpr)
out_flat = _custom_root(
const_lengths, jaxprs, *(_flatten(all_consts) + guess_flat))
return tree_unflatten(out_tree, out_flat)
@partial(jax.custom_jvp, nondiff_argnums=(0, 1))
def _custom_root(const_lengths, jaxprs, *args):
params, initial_guess = _split_root_args(args, const_lengths)
solution = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + initial_guess))
return solution
@_custom_root.defjvp
def _root_jvp(const_lengths, jaxprs, primals, tangents):
params, _ = _split_root_args(primals, const_lengths)
solution = _custom_root(const_lengths, jaxprs, *primals)
params_dot, _ = _split_root_args(tangents, const_lengths)
# F(m, u) = 0 # system of equations in u, parameterized by m
# # solution is u*(m) defined in a neighborhood
# F(m, u*(m)) = 0 # satisfied in a neighborhood
#
# ∂_0 F(m, u*(m)) + ∂_1 F(m, u*(m)) ∂ u*(m) = 0 # implied by line above
# ∂ u*(m) = - (∂_1 F(m, u*(m)))^{-1} ∂_0 F(m, u*(m)) # rearrange
#
# ∂ u*(m)[v] = - (∂_1 F(m, u*(m)))^{-1} [∂_0 F(m, u*(m))[v]] # jvp
f = core.jaxpr_as_fun(jaxprs.f)
linearize_and_solve = partial(
core.jaxpr_as_fun(jaxprs.l_and_s), *params.l_and_s)
f_at_solution = lambda *params: f(*itertools.chain(params, solution))
_, rhs = ad.jvp(lu.wrap_init(f_at_solution)).call_wrapped(
params.f, params_dot.f)
solution_dot = _map(
operator.neg, linearize_and_solve(*itertools.chain(solution, rhs)))
return solution, solution_dot
class _LinearSolveTuple(collections.namedtuple(
'_LinearSolveTuple', 'matvec, vecmat, solve, transpose_solve')):
def transpose(self):
return type(self)(self.vecmat, self.matvec, self.transpose_solve, self.solve)
def _split_linear_solve_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _LinearSolveTuple(*params_list[:-1]), params_list[-1]
def _transpose_one_output(linear_fun, primals):
transpose_fun = jax.linear_transpose(linear_fun, primals)
def transposed_fun(x):
(y,) = transpose_fun(x)
return y
return transposed_fun
def _flatten(args):
return [x for arg in args for x in arg]
def _check_shapes(func_name, expected_name, actual, expected):
actual_shapes = _map(np.shape, tree_leaves(actual))
expected_shapes = _map(np.shape, tree_leaves(expected))
if actual_shapes != expected_shapes:
raise ValueError(
f"{func_name}() output shapes must match {expected_name}, "
f"got {actual_shapes} and {expected_shapes}")
@api_boundary
def custom_linear_solve(
matvec, b, solve, transpose_solve=None, symmetric=False, has_aux=False):
"""Perform a matrix-free linear solve with implicitly defined gradients.
This function allows for overriding or defining gradients for a linear
solve directly via implicit differentiation at the solution, rather than by
differentiating *through* the solve operation. This can sometimes be much faster
or more numerically stable, or differentiating through the solve operation
may not even be implemented (e.g., if ``solve`` uses ``lax.while_loop``).
Required invariant::
x = solve(matvec, b) # solve the linear equation
assert matvec(x) == b # not checked
Args:
matvec: linear function to invert. Must be differentiable.
b: constant right handle side of the equation. May be any nested structure
of arrays.
solve: higher level function that solves for solution to the linear
equation, i.e., ``solve(matvec, x) == x`` for all ``x`` of the same form
as ``b``. This function need not be differentiable.
transpose_solve: higher level function for solving the transpose linear
equation, i.e., ``transpose_solve(vecmat, x) == x``, where ``vecmat`` is
the transpose of the linear map ``matvec`` (computed automatically with
autodiff). Required for backwards mode automatic differentiation, unless
``symmetric=True``, in which case ``solve`` provides the default value.
symmetric: bool indicating if it is safe to assume the linear map
corresponds to a symmetric matrix, i.e., ``matvec == vecmat``.
has_aux: bool indicating whether the ``solve`` and ``transpose_solve`` functions
return auxiliary data like solver diagnostics as a second argument.
Returns:
Result of ``solve(matvec, b)``, with gradients defined assuming that the
solution ``x`` satisfies the linear equation ``matvec(x) == b``.
"""
if transpose_solve is None and symmetric:
transpose_solve = solve
b_flat, in_args_tree = tree_flatten((b,))
b_avals = tuple(_map(_abstractify, b_flat))
tree, = treedef_children(in_args_tree)
def _shape_checked(fun, name, has_aux):
def f(x):
y = fun(x)
_check_shapes(name, "b", y, b_flat)
return y
def f_aux(x):
y, aux = fun(x)
_check_shapes(name, "b", y, b_flat)
return y, aux
return f_aux if has_aux else f
# no auxiliary data assumed for matvec
matvec_jaxpr, matvec_consts, out_tree = _initial_style_jaxpr(
_shape_checked(matvec, "matvec", False), in_args_tree, b_avals,
'custom_linear_solve')
_check_tree("matvec", "b", out_tree, tree, False)
solve_jaxpr, solve_consts, out_tree = _initial_style_jaxpr(
_shape_checked(partial(solve, matvec), "solve", has_aux), in_args_tree, b_avals,
'custom_linear_solve')
_check_tree("solve", "b", out_tree, tree, has_aux)
if transpose_solve is None:
vecmat_jaxpr = tr_solve_jaxpr = None
vecmat_consts = tr_solve_consts = []
else:
if symmetric:
vecmat = matvec
vecmat_jaxpr = matvec_jaxpr
vecmat_consts = matvec_consts
else:
vecmat = _transpose_one_output(matvec, b)
vecmat_jaxpr, vecmat_consts, out_tree = _initial_style_jaxpr(
vecmat, in_args_tree, b_avals, 'custom_linear_solve')
assert out_tree == tree
tr_solve_jaxpr, tr_solve_consts, out_tree = _initial_style_jaxpr(
_shape_checked(partial(transpose_solve, vecmat), "transpose_solve", has_aux),
in_args_tree, b_avals, 'custom_linear_solve')
_check_tree("transpose_solve", "b", out_tree, tree, has_aux)
all_consts = [matvec_consts, vecmat_consts, solve_consts, tr_solve_consts]
const_lengths = _LinearSolveTuple(*_map(len, all_consts))
jaxprs = _LinearSolveTuple(
matvec_jaxpr, vecmat_jaxpr, solve_jaxpr, tr_solve_jaxpr)
out_flat = linear_solve_p.bind(
*(_flatten(all_consts) + b_flat),
const_lengths=const_lengths, jaxprs=jaxprs)
return tree_unflatten(out_tree, out_flat)
def _linear_solve_abstract_eval(*args, const_lengths, jaxprs):
args_to_raise = args[sum(const_lengths):]
# raise aux_args to shaped arrays as well if present
# number of aux args is the difference in out_avals
# of solve and matvec (since they map to the same vector space)
num_aux = len(jaxprs.solve.out_avals) - len(jaxprs.matvec.out_avals)
if num_aux > 0:
args_to_raise += tuple(jaxprs.solve.out_avals[-num_aux:])
return _map(raise_to_shaped, args_to_raise)
def _custom_linear_solve_impl(*args, const_lengths, jaxprs):
params, b = _split_linear_solve_args(args, const_lengths)
x = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + b))
return x
def _tangent_linear_map(func, params, params_dot, *x):
"""Compute the tangent of a linear map.
Assuming ``func(*params, *x)`` is linear in ``x`` and computes ``A @ x``,
this function computes ``∂A @ x``.
"""
assert any(type(p) is not ad_util.Zero for p in params_dot)
zeros = _map(ad_util.Zero.from_value, x)
_, out_tangent = ad.jvp(lu.wrap_init(func)).call_wrapped(
params + list(x), params_dot + zeros)
return out_tangent
def _custom_linear_solve_jvp(primals, tangents, const_lengths, jaxprs):
# A x - b = 0
# ∂A x + A ∂x - ∂b = 0
# ∂x = A^{-1} (∂b - ∂A x)
kwargs = dict(const_lengths=const_lengths, jaxprs=jaxprs)
x = linear_solve_p.bind(*primals, **kwargs)
params, _ = _split_linear_solve_args(primals, const_lengths)
params_dot, b_dot = _split_linear_solve_args(tangents, const_lengths)
num_x_leaves = len(b_dot)
# x is a flat tree with possible aux values appended
# since x_tree == b_tree == b_dot_tree, we can cut off
# aux values with len info provided by b_dot tree here
x_leaves, _ = split_list(x, [num_x_leaves])
if all(type(p) is ad_util.Zero for p in params_dot.matvec):
# no need to evaluate matvec_tangents
rhs = b_dot
else:
matvec_tangents = _tangent_linear_map(
core.jaxpr_as_fun(jaxprs.matvec), params.matvec, params_dot.matvec, *x_leaves)
rhs = _map(ad.add_tangents, b_dot, _map(operator.neg, matvec_tangents))
x_dot = linear_solve_p.bind(*(_flatten(params) + rhs), **kwargs)
# split into x tangents and aux tangents (these become zero)
dx_leaves, daux_leaves = split_list(x_dot, [num_x_leaves])
daux_leaves = _map(ad_util.Zero.from_value, daux_leaves)
x_dot = dx_leaves + daux_leaves
return x, x_dot
def _linear_solve_transpose_rule(cotangent, *primals, const_lengths, jaxprs):
if jaxprs.transpose_solve is None:
raise TypeError('transpose_solve required for backwards mode automatic '
'differentiation of custom_linear_solve')
params, b = _split_linear_solve_args(primals, const_lengths)
# split off symbolic zeros in the cotangent if present
x_cotangent, _ = split_list(cotangent, [len(b)])
assert all(ad.is_undefined_primal(x) for x in b)
cotangent_b_full = linear_solve_p.bind(
*(_flatten(params.transpose()) + x_cotangent),
const_lengths=const_lengths.transpose(), jaxprs=jaxprs.transpose())
# drop aux values in cotangent computation
cotangent_b, _ = split_list(cotangent_b_full, [len(b)])
return [None] * sum(const_lengths) + cotangent_b
def _linear_solve_batching_rule(args, dims, axis_name, main_type, const_lengths, jaxprs):
orig_bat = [d is not batching.not_mapped for d in dims]
size, = {
a.shape[d] for a, d in zip(args, dims) if d is not batching.not_mapped
}
params, b = _split_linear_solve_args(args, const_lengths)
params_dims, b_dims = _split_linear_solve_args(dims, const_lengths)
params_bat, orig_b_bat = _split_linear_solve_args(orig_bat, const_lengths)
(matvec, vecmat, solve, solve_t) = jaxprs
(matvec_bat, vecmat_bat, solve_bat, solve_t_bat) = params_bat
num_aux = len(solve.out_avals) - len(matvec.out_avals)
# Fixpoint computation of which parts of x and b are batched; we need to
# ensure this is consistent between all four jaxprs
b_bat = orig_b_bat
x_bat = [False] * len(solve.out_avals)
for i in range(1 + len(orig_b_bat) + len(solve.out_avals)):
# Apply vecmat and solve -> new batched parts of x
solve_jaxpr_batched, solve_x_bat = batching.batch_jaxpr(
solve, size, solve_bat + b_bat, instantiate=x_bat,
axis_name=axis_name, main_type=main_type)
if vecmat is None:
vecmat_jaxpr_batched = None
x_bat_out = solve_x_bat
else:
vecmat_jaxpr_batched, vecmat_x_bat = batching.batch_jaxpr(
vecmat, size, vecmat_bat + b_bat, instantiate=x_bat,
axis_name=axis_name, main_type=main_type)
# batch all aux data by default
x_bat_out = _map(operator.or_, vecmat_x_bat + [True] * num_aux, solve_x_bat)
# Apply matvec and solve_t -> new batched parts of b
matvec_jaxpr_batched, matvec_b_bat = batching.batch_jaxpr(
matvec, size, matvec_bat + x_bat_out, instantiate=b_bat,
axis_name=axis_name, main_type=main_type)
if solve_t is None:
solve_t_jaxpr_batched = None
b_bat_out = _map(operator.or_, matvec_b_bat, orig_b_bat)
else:
solve_t_jaxpr_batched, solve_t_b_bat = batching.batch_jaxpr(
solve_t, size, solve_t_bat + x_bat_out, instantiate=b_bat,
axis_name=axis_name, main_type=main_type)
b_bat_out = _map(lambda m, s, o: m or s or o, matvec_b_bat, solve_t_b_bat,
orig_b_bat)
if x_bat_out == x_bat and b_bat_out == b_bat:
break
else:
x_bat = x_bat_out
b_bat = b_bat_out
else:
assert False, "Fixedpoint not reached"
batched_jaxprs = _LinearSolveTuple(matvec_jaxpr_batched, vecmat_jaxpr_batched,
solve_jaxpr_batched, solve_t_jaxpr_batched)
# Move batched axes to the front
new_params = [
batching.moveaxis(x, d, 0)
if d is not batching.not_mapped and d != 0 else x
for x, d in zip(_flatten(params), _flatten(params_dims))
]
# Broadcast out b if necessary
new_b = [
batching.broadcast(x, size, 0) if now_bat and not was_bat else
batching.moveaxis(x, d, 0) if now_bat and d != 0 else x
for x, d, was_bat, now_bat in zip(b, b_dims, orig_b_bat, b_bat)
]
outs = linear_solve_p.bind(
*(new_params + new_b),
const_lengths=const_lengths,
jaxprs=batched_jaxprs)
out_dims = [0 if batched else batching.not_mapped for batched in solve_x_bat]
return outs, out_dims
linear_solve_p = core.Primitive('custom_linear_solve')
linear_solve_p.multiple_results = True
linear_solve_p.def_impl(_custom_linear_solve_impl)
linear_solve_p.def_abstract_eval(_linear_solve_abstract_eval)
ad.primitive_jvps[linear_solve_p] = _custom_linear_solve_jvp
xla.initial_style_translations[linear_solve_p] = \
xla.lower_fun_initial_style(_custom_linear_solve_impl)
ad.primitive_transposes[linear_solve_p] = _linear_solve_transpose_rule
batching.initial_style_batchers[linear_solve_p] = _linear_solve_batching_rule
def _interleave(a, b, axis):
"""Given two Tensors of static shape, interleave them along the first axis."""
assert a.shape[axis] == b.shape[axis] or a.shape[axis] == b.shape[axis] + 1
a_pad = [(0, 0, 0)] * a.ndim
b_pad = [(0, 0, 0)] * b.ndim
a_pad[axis] = (0, 1 if a.shape[axis] == b.shape[axis] else 0, 1)
b_pad[axis] = (1, 0 if a.shape[axis] == b.shape[axis] else 1, 1)
op = lax.bitwise_or if a.dtype == np.bool_ else lax.add
return op(lax.pad(a, lax._const(a, 0), a_pad),
lax.pad(b, lax._const(b, 0), b_pad))
@api_boundary
def associative_scan(fn: Callable, elems, reverse: bool = False, axis: int = 0):
"""Performs a scan with an associative binary operation, in parallel.
For an introduction to associative scans, see [BLE1990]_.
Args:
fn: A Python callable implementing an associative binary operation with
signature ``r = fn(a, b)``. Function `fn` must be associative, i.e., it
must satisfy the equation
``fn(a, fn(b, c)) == fn(fn(a, b), c)``.
The inputs and result are (possibly nested Python tree structures of)
array(s) matching ``elems``. Each array has a dimension in place
of the ``axis`` dimension. `fn` should be applied elementwise over
the ``axis`` dimension (for example, by using :func:`jax.vmap` over the
elementwise function.)
The result ``r`` has the same shape (and structure) as the two inputs
``a`` and ``b``.
elems: A (possibly nested Python tree structure of) array(s), each with
an ``axis`` dimension of size ``num_elems``.
reverse: A boolean stating if the scan should be reversed with respect to
the ``axis`` dimension.
axis: an integer identifying the axis over which the scan should occur.
Returns:
A (possibly nested Python tree structure of) array(s) of the same shape
and structure as ``elems``, in which the ``k``'th element of ``axis`` is the
result of recursively applying ``fn`` to combine the first ``k`` elements
of ``elems`` along ``axis``. For example, given ``elems = [a, b, c, ...]``,
the result would be ``[a, fn(a, b), fn(fn(a, b), c), ...]``.
Example 1: partial sums of an array of numbers:
>>> lax.associative_scan(jnp.add, jnp.arange(0, 4))
DeviceArray([0, 1, 3, 6], dtype=int32)
Example 2: partial products of an array of matrices
>>> mats = jax.random.uniform(jax.random.PRNGKey(0), (4, 2, 2))
>>> partial_prods = lax.associative_scan(jnp.matmul, mats)
>>> partial_prods.shape
(4, 2, 2)
Example 3: reversed partial sums of an array of numbers
>>> lax.associative_scan(jnp.add, jnp.arange(0, 4), reverse=True)
DeviceArray([6, 6, 5, 3], dtype=int32)
.. [BLE1990] Blelloch, Guy E. 1990. "Prefix Sums and Their Applications.",
Technical Report CMU-CS-90-190, School of Computer Science, Carnegie Mellon
University.
"""
elems_flat, tree = tree_flatten(elems)
if reverse:
elems_flat = [lax.rev(elem, [axis]) for elem in elems_flat]
def combine(a_flat, b_flat):
# Lower `fn` to operate on flattened sequences of elems.
a = tree_unflatten(tree, a_flat)
b = tree_unflatten(tree, b_flat)
c = fn(a, b)
c_flat, _ = tree_flatten(c)
return c_flat
# Check that all inputs have a consistent leading dimension `num_elems`.
axis = lax._canonicalize_axis(axis, elems_flat[0].ndim)
num_elems = int(elems_flat[0].shape[axis])
if not all(int(elem.shape[axis]) == num_elems for elem in elems_flat[1:]):
raise ValueError('Array inputs to associative_scan must have the same '
'first dimension. (saw: {})'
.format([elem.shape for elem in elems_flat]))
# Summary of algorithm:
#
# Consider elements of `_scan(elems)` at odd indices. That's the same as first
# summing successive pairs of elements of `elems` and performing a scan on
# that half sized tensor. We perform the latter scan by recursion.
#
# Now consider the even elements of `_scan(elems)`. These can be computed
# from the odd elements of `_scan(elems)` by adding each odd element of
# `_scan(elems)` to the matching even element in the original `elems`.
#
# We return the odd and even elements interleaved.
#
# For the base case of the recursion we return the first element
# of `elems` followed by the sum of the first two elements computed as
# a (small two-down-to-one) reduction step.
def _scan(elems):
"""Perform scan on `elems`."""
num_elems = elems[0].shape[axis]
if num_elems < 2:
return elems
# Combine adjacent pairs of elements.
reduced_elems = combine(
[lax.slice_in_dim(elem, 0, -1, stride=2, axis=axis) for elem in elems],
[lax.slice_in_dim(elem, 1, None, stride=2, axis=axis) for elem in elems])
# Recursively compute scan for partially reduced tensors.
odd_elems = _scan(reduced_elems)
if num_elems % 2 == 0:
even_elems = combine(
[lax.slice_in_dim(e, 0, -1, axis=axis) for e in odd_elems],
[lax.slice_in_dim(e, 2, None, stride=2, axis=axis) for e in elems])
else:
even_elems = combine(
odd_elems,
[lax.slice_in_dim(e, 2, None, stride=2, axis=axis) for e in elems])
# The first element of a scan is the same as the first element
# of the original `elems`.
even_elems = [
lax.concatenate([lax.slice_in_dim(elem, 0, 1, axis=axis), result],
dimension=axis)
for (elem, result) in zip(elems, even_elems)]
return list(_map(partial(_interleave, axis=axis), even_elems, odd_elems))
scans = _scan(elems_flat)
if reverse:
scans = [lax.rev(scanned, [axis]) for scanned in scans]
return tree_unflatten(tree, scans)
# Cumulative reductions.
def cumsum(operand: Array, axis: int = 0, reverse: bool = False) -> Array:
"""Computes a cumulative sum along `axis`."""
return cumsum_p.bind(operand, axis=int(axis), reverse=bool(reverse))
def cumprod(operand: Array, axis: int = 0, reverse: bool = False) -> Array:
"""Computes a cumulative product along `axis`."""
return cumprod_p.bind(operand, axis=int(axis), reverse=bool(reverse))
def cummax(operand: Array, axis: int = 0, reverse: bool = False) -> Array:
"""Computes a cumulative maximum along `axis`."""
return cummax_p.bind(operand, axis=int(axis), reverse=bool(reverse))
def cummin(operand: Array, axis: int = 0, reverse: bool = False) -> Array:
"""Computes a cumulative minimum along `axis`."""
return cummin_p.bind(operand, axis=int(axis), reverse=bool(reverse))
def _cumred_shape_rule(x, *, axis: int, reverse: bool):
if axis < 0 or axis >= x.ndim:
raise ValueError(
"axis {} is out of bounds for array of shape {}".format(axis, x.shape))
return x.shape
def _cumsum_transpose_rule(t, operand, *, axis: int, reverse: bool):
return [cumsum(t, axis=axis, reverse=not reverse)]
def _cumred_tpu_translation_rule(window_reduce: Callable, x, *,
axis: int, reverse: bool):
# On TPU, an implementation using reduce_window is handled specially by the
# compiler and is efficient. On other backends, it is O(n^2).
n = x.shape[axis]
if n == 0:
return x
padding = [(0, 0)] * x.ndim
padding[axis] = (0, n - 1) if reverse else (n - 1, 0)
strides = [1] * x.ndim
window_dims = [1] * x.ndim
window_dims[axis] = n
return window_reduce(x, window_dims, strides, padding)
def _cumred_batch_rule(prim, batched_args, batch_dims, *, axis: int,
reverse: bool):
operand, = batched_args
bdim, = batch_dims
axis = axis if axis < bdim else axis + 1
return prim.bind(operand, axis=axis, reverse=reverse), bdim
def _cumred_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, np.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, np.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _cumulative_reduction_primitive(name,
reduce_fn,
tpu_reduce_window_fn):
reducer_p = lax.standard_primitive(
_cumred_shape_rule, partial(_cumred_dtype_rule, name),
name,
translation_rule=xla.lower_fun(
partial(associative_scan, reduce_fn),
multiple_results=False))
xla.backend_specific_translations['tpu'][reducer_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, tpu_reduce_window_fn),
multiple_results=False)
batching.primitive_batchers[reducer_p] = partial(_cumred_batch_rule, reducer_p)
return reducer_p
cumsum_p = _cumulative_reduction_primitive("cumsum", lax.add, lax._reduce_window_sum)
ad.deflinear2(cumsum_p, _cumsum_transpose_rule)
cumprod_p = _cumulative_reduction_primitive("cumprod", lax.mul, lax._reduce_window_prod)
cummax_p = _cumulative_reduction_primitive("cummax", lax.max, lax._reduce_window_max)
cummin_p = _cumulative_reduction_primitive("cummin", lax.min, lax._reduce_window_min)
def _cumulative_jvp_rule(primals, tangents, *, axis: int, reverse: bool,
combine_fn: Callable):
# Irrespective of backend, we always use the parallel prefix scan
# implementation when differentiating because reduce_window is not
# arbitrarily differentiable.
return api.jvp(partial(associative_scan, combine_fn, axis=axis,
reverse=reverse),
primals, tangents)
ad.primitive_jvps[cumprod_p] = partial(_cumulative_jvp_rule, combine_fn=lax.mul)
ad.primitive_jvps[cummin_p] = partial(_cumulative_jvp_rule, combine_fn=lax.min)
ad.primitive_jvps[cummax_p] = partial(_cumulative_jvp_rule, combine_fn=lax.max)
| 41.88843 | 114 | 0.698916 |
258427bece34d944a7764f3f20d42bcaf4c45523 | 205 | py | Python | 2019/day1/day1p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | null | null | null | 2019/day1/day1p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | 2 | 2019-12-01T20:03:18.000Z | 2021-05-11T22:41:00.000Z | 2019/day1/day1p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | null | null | null | file = open('./input')
def findFuel(mass):
raw = mass // 3 - 2
if raw <= 0:
return 0
return raw + findFuel(raw)
sum = 0
for line in file:
sum += findFuel(int(line))
print(sum)
| 12.8125 | 30 | 0.55122 |
00176c24a883ff7f6aeb39eed299e4a096e6b950 | 707 | py | Python | my_env/Lib/site-packages/sklearn/experimental/enable_iterative_imputer.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | 2 | 2021-05-02T07:59:56.000Z | 2021-12-14T19:53:13.000Z | Web application/env/Lib/site-packages/sklearn/experimental/enable_iterative_imputer.py | arpit0891/Covid-19-and-Pneumonia-detection-from-X-Ray | 6b2756e4672ab25083a0a50f44f36bec1833e789 | [
"MIT"
] | 7 | 2021-06-08T21:46:24.000Z | 2022-03-12T00:35:31.000Z | my_env/Lib/site-packages/sklearn/experimental/enable_iterative_imputer.py | obulrdy6881/Drowsinss | 61cb9281d7dd22aee282b517e2fbf500f0ff9935 | [
"MIT"
] | 1 | 2021-05-02T07:59:59.000Z | 2021-05-02T07:59:59.000Z | """Enables IterativeImputer
The API and results of this estimator might change without any deprecation
cycle.
Importing this file dynamically sets :class:`sklearn.impute.IterativeImputer`
as an attribute of the impute module::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_iterative_imputer # noqa
>>> # now you can import normally from impute
>>> from sklearn.impute import IterativeImputer
"""
from ..impute._iterative import IterativeImputer
from .. import impute
# use settattr to avoid mypy errors when monkeypatching
setattr(impute, 'IterativeImputer', IterativeImputer)
impute.__all__ += ['IterativeImputer']
| 33.666667 | 78 | 0.755304 |
f07ba3569c6681ee886ae93348a466be5a66df13 | 29,056 | py | Python | networkx/algorithms/threshold.py | kakila/networkx | a7c7e0bd2057acf8066d796e53f6ea1a77be803e | [
"BSD-3-Clause"
] | null | null | null | networkx/algorithms/threshold.py | kakila/networkx | a7c7e0bd2057acf8066d796e53f6ea1a77be803e | [
"BSD-3-Clause"
] | null | null | null | networkx/algorithms/threshold.py | kakila/networkx | a7c7e0bd2057acf8066d796e53f6ea1a77be803e | [
"BSD-3-Clause"
] | null | null | null | """
Threshold Graphs - Creation, manipulation and identification.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
__all__ = ['is_threshold_graph', 'find_threshold_graph']
import random # for swap_d
from math import sqrt
import networkx
def is_threshold_graph(G):
"""
Returns True if G is a threshold graph.
"""
return is_threshold_sequence(list(d for n, d in G.degree()))
def is_threshold_sequence(degree_sequence):
"""
Returns True if the sequence is a threshold degree seqeunce.
Uses the property that a threshold graph must be constructed by
adding either dominating or isolated nodes. Thus, it can be
deconstructed iteratively by removing a node of degree zero or a
node that connects to the remaining nodes. If this deconstruction
failes then the sequence is not a threshold sequence.
"""
ds = degree_sequence[:] # get a copy so we don't destroy original
ds.sort()
while ds:
if ds[0]==0: # if isolated node
ds.pop(0) # remove it
continue
if ds[-1] != len(ds)-1: # is the largest degree node dominating?
return False # no, not a threshold degree sequence
ds.pop() # yes, largest is the dominating node
ds=[ d-1 for d in ds ] # remove it and decrement all degrees
return True
def creation_sequence(degree_sequence,with_labels=False,compact=False):
"""
Determines the creation sequence for the given threshold degree sequence.
The creation sequence is a list of single characters 'd'
or 'i': 'd' for dominating or 'i' for isolated vertices.
Dominating vertices are connected to all vertices present when it
is added. The first node added is by convention 'd'.
This list can be converted to a string if desired using "".join(cs)
If with_labels==True:
Returns a list of 2-tuples containing the vertex number
and a character 'd' or 'i' which describes the type of vertex.
If compact==True:
Returns the creation sequence in a compact form that is the number
of 'i's and 'd's alternating.
Examples:
[1,2,2,3] represents d,i,i,d,d,i,i,i
[3,1,2] represents d,d,d,i,d,d
Notice that the first number is the first vertex to be used for
construction and so is always 'd'.
with_labels and compact cannot both be True.
Returns None if the sequence is not a threshold sequence
"""
if with_labels and compact:
raise ValueError("compact sequences cannot be labeled")
# make an indexed copy
if isinstance(degree_sequence, dict): # labeled degree seqeunce
ds = [ [degree,label] for (label,degree) in degree_sequence.items() ]
else:
ds=[ [d,i] for i,d in enumerate(degree_sequence) ]
ds.sort()
cs=[] # creation sequence
while ds:
if ds[0][0]==0: # isolated node
(d,v)=ds.pop(0)
if len(ds)>0: # make sure we start with a d
cs.insert(0,(v,'i'))
else:
cs.insert(0,(v,'d'))
continue
if ds[-1][0]!=len(ds)-1: # Not dominating node
return None # not a threshold degree sequence
(d,v)=ds.pop()
cs.insert(0,(v,'d'))
ds=[ [d[0]-1,d[1]] for d in ds ] # decrement due to removing node
if with_labels: return cs
if compact: return make_compact(cs)
return [ v[1] for v in cs ] # not labeled
def make_compact(creation_sequence):
"""
Returns the creation sequence in a compact form
that is the number of 'i's and 'd's alternating.
Examples:
[1,2,2,3] represents d,i,i,d,d,i,i,i.
[3,1,2] represents d,d,d,i,d,d.
Notice that the first number is the first vertex
to be used for construction and so is always 'd'.
Labeled creation sequences lose their labels in the
compact representation.
"""
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
cs = creation_sequence[:]
elif isinstance(first,tuple): # labeled creation sequence
cs = [ s[1] for s in creation_sequence ]
elif isinstance(first,int): # compact creation sequence
return creation_sequence
else:
raise TypeError("Not a valid creation sequence type")
ccs=[]
count=1 # count the run lengths of d's or i's.
for i in range(1,len(cs)):
if cs[i]==cs[i-1]:
count+=1
else:
ccs.append(count)
count=1
ccs.append(count) # don't forget the last one
return ccs
def uncompact(creation_sequence):
"""
Converts a compact creation sequence for a threshold
graph to a standard creation sequence (unlabeled).
If the creation_sequence is already standard, return it.
See creation_sequence.
"""
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
return creation_sequence
elif isinstance(first,tuple): # labeled creation sequence
return creation_sequence
elif isinstance(first,int): # compact creation sequence
ccscopy=creation_sequence[:]
else:
raise TypeError("Not a valid creation sequence type")
cs = []
while ccscopy:
cs.extend(ccscopy.pop(0)*['d'])
if ccscopy:
cs.extend(ccscopy.pop(0)*['i'])
return cs
def creation_sequence_to_weights(creation_sequence):
"""
Returns a list of node weights which create the threshold
graph designated by the creation sequence. The weights
are scaled so that the threshold is 1.0. The order of the
nodes is the same as that in the creation sequence.
"""
# Turn input sequence into a labeled creation sequence
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
if isinstance(creation_sequence,list):
wseq = creation_sequence[:]
else:
wseq = list(creation_sequence) # string like 'ddidid'
elif isinstance(first,tuple): # labeled creation sequence
wseq = [ v[1] for v in creation_sequence]
elif isinstance(first,int): # compact creation sequence
wseq = uncompact(creation_sequence)
else:
raise TypeError("Not a valid creation sequence type")
# pass through twice--first backwards
wseq.reverse()
w=0
prev='i'
for j,s in enumerate(wseq):
if s=='i':
wseq[j]=w
prev=s
elif prev=='i':
prev=s
w+=1
wseq.reverse() # now pass through forwards
for j,s in enumerate(wseq):
if s=='d':
wseq[j]=w
prev=s
elif prev=='d':
prev=s
w+=1
# Now scale weights
if prev=='d': w+=1
wscale=1./float(w)
return [ ww*wscale for ww in wseq]
#return wseq
def weights_to_creation_sequence(weights,threshold=1,with_labels=False,compact=False):
"""
Returns a creation sequence for a threshold graph
determined by the weights and threshold given as input.
If the sum of two node weights is greater than the
threshold value, an edge is created between these nodes.
The creation sequence is a list of single characters 'd'
or 'i': 'd' for dominating or 'i' for isolated vertices.
Dominating vertices are connected to all vertices present
when it is added. The first node added is by convention 'd'.
If with_labels==True:
Returns a list of 2-tuples containing the vertex number
and a character 'd' or 'i' which describes the type of vertex.
If compact==True:
Returns the creation sequence in a compact form that is the number
of 'i's and 'd's alternating.
Examples:
[1,2,2,3] represents d,i,i,d,d,i,i,i
[3,1,2] represents d,d,d,i,d,d
Notice that the first number is the first vertex to be used for
construction and so is always 'd'.
with_labels and compact cannot both be True.
"""
if with_labels and compact:
raise ValueError("compact sequences cannot be labeled")
# make an indexed copy
if isinstance(weights,dict): # labeled weights
wseq = [ [w,label] for (label,w) in weights.items() ]
else:
wseq = [ [w,i] for i,w in enumerate(weights) ]
wseq.sort()
cs=[] # creation sequence
cutoff=threshold-wseq[-1][0]
while wseq:
if wseq[0][0]<cutoff: # isolated node
(w,label)=wseq.pop(0)
cs.append((label,'i'))
else:
(w,label)=wseq.pop()
cs.append((label,'d'))
cutoff=threshold-wseq[-1][0]
if len(wseq)==1: # make sure we start with a d
(w,label)=wseq.pop()
cs.append((label,'d'))
# put in correct order
cs.reverse()
if with_labels: return cs
if compact: return make_compact(cs)
return [ v[1] for v in cs ] # not labeled
# Manipulating NetworkX.Graphs in context of threshold graphs
def threshold_graph(creation_sequence, create_using=None):
"""
Create a threshold graph from the creation sequence or compact
creation_sequence.
The input sequence can be a
creation sequence (e.g. ['d','i','d','d','d','i'])
labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')])
compact creation sequence (e.g. [2,1,1,2,0])
Use cs=creation_sequence(degree_sequence,labeled=True)
to convert a degree sequence to a creation sequence.
Returns None if the sequence is not valid
"""
# Turn input sequence into a labeled creation sequence
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
ci = list(enumerate(creation_sequence))
elif isinstance(first,tuple): # labeled creation sequence
ci = creation_sequence[:]
elif isinstance(first,int): # compact creation sequence
cs = uncompact(creation_sequence)
ci = list(enumerate(cs))
else:
print("not a valid creation sequence type")
return None
if create_using is None:
G = networkx.Graph()
elif create_using.is_directed():
raise networkx.NetworkXError("Directed Graph not supported")
else:
G = create_using
G.clear()
G.name="Threshold Graph"
# add nodes and edges
# if type is 'i' just add nodea
# if type is a d connect to everything previous
while ci:
(v,node_type)=ci.pop(0)
if node_type=='d': # dominating type, connect to all existing nodes
# We use `for u in list(G):` instead of
# `for u in G:` because we edit the graph `G` in
# the loop. Hence using an iterator will result in
# `RuntimeError: dictionary changed size during iteration`
for u in list(G):
G.add_edge(v, u)
G.add_node(v)
return G
def find_alternating_4_cycle(G):
"""
Returns False if there aren't any alternating 4 cycles.
Otherwise returns the cycle as [a,b,c,d] where (a,b)
and (c,d) are edges and (a,c) and (b,d) are not.
"""
for (u,v) in G.edges():
for w in G.nodes():
if not G.has_edge(u,w) and u!=w:
for x in G.neighbors(w):
if not G.has_edge(v,x) and v!=x:
return [u,v,w,x]
return False
def find_threshold_graph(G, create_using=None):
"""
Return a threshold subgraph that is close to largest in G.
The threshold graph will contain the largest degree node in G.
"""
return threshold_graph(find_creation_sequence(G),create_using)
def find_creation_sequence(G):
"""
Find a threshold subgraph that is close to largest in G.
Returns the labeled creation sequence of that threshold graph.
"""
cs=[]
# get a local pointer to the working part of the graph
H=G
while H.order()>0:
# get new degree sequence on subgraph
dsdict = dict(H.degree())
ds = [(d, v) for v,d in dsdict.items()]
ds.sort()
# Update threshold graph nodes
if ds[-1][0]==0: # all are isolated
cs.extend(zip(dsdict, ['i'] * (len(ds) - 1) + ['d']))
break # Done!
# pull off isolated nodes
while ds[0][0]==0:
(d,iso)=ds.pop(0)
cs.append((iso,'i'))
# find new biggest node
(d,bigv)=ds.pop()
# add edges of star to t_g
cs.append((bigv,'d'))
# form subgraph of neighbors of big node
H=H.subgraph(H.neighbors(bigv))
cs.reverse()
return cs
### Properties of Threshold Graphs
def triangles(creation_sequence):
"""
Compute number of triangles in the threshold graph with the
given creation sequence.
"""
# shortcut algoritm that doesn't require computing number
# of triangles at each node.
cs=creation_sequence # alias
dr=cs.count("d") # number of d's in sequence
ntri=dr*(dr-1)*(dr-2)/6 # number of triangles in clique of nd d's
# now add dr choose 2 triangles for every 'i' in sequence where
# dr is the number of d's to the right of the current i
for i,typ in enumerate(cs):
if typ=="i":
ntri+=dr*(dr-1)/2
else:
dr-=1
return ntri
def triangle_sequence(creation_sequence):
"""
Return triangle sequence for the given threshold graph creation sequence.
"""
cs=creation_sequence
seq=[]
dr=cs.count("d") # number of d's to the right of the current pos
dcur=(dr-1)*(dr-2) // 2 # number of triangles through a node of clique dr
irun=0 # number of i's in the last run
drun=0 # number of d's in the last run
for i,sym in enumerate(cs):
if sym=="d":
drun+=1
tri=dcur+(dr-1)*irun # new triangles at this d
else: # cs[i]="i":
if prevsym=="d": # new string of i's
dcur+=(dr-1)*irun # accumulate shared shortest paths
irun=0 # reset i run counter
dr-=drun # reduce number of d's to right
drun=0 # reset d run counter
irun+=1
tri=dr*(dr-1) // 2 # new triangles at this i
seq.append(tri)
prevsym=sym
return seq
def cluster_sequence(creation_sequence):
"""
Return cluster sequence for the given threshold graph creation sequence.
"""
triseq=triangle_sequence(creation_sequence)
degseq=degree_sequence(creation_sequence)
cseq=[]
for i,deg in enumerate(degseq):
tri=triseq[i]
if deg <= 1: # isolated vertex or single pair gets cc 0
cseq.append(0)
continue
max_size=(deg*(deg-1)) // 2
cseq.append(float(tri)/float(max_size))
return cseq
def degree_sequence(creation_sequence):
"""
Return degree sequence for the threshold graph with the given
creation sequence
"""
cs=creation_sequence # alias
seq=[]
rd=cs.count("d") # number of d to the right
for i,sym in enumerate(cs):
if sym=="d":
rd-=1
seq.append(rd+i)
else:
seq.append(rd)
return seq
def density(creation_sequence):
"""
Return the density of the graph with this creation_sequence.
The density is the fraction of possible edges present.
"""
N=len(creation_sequence)
two_size=sum(degree_sequence(creation_sequence))
two_possible=N*(N-1)
den=two_size/float(two_possible)
return den
def degree_correlation(creation_sequence):
"""
Return the degree-degree correlation over all edges.
"""
cs=creation_sequence
s1=0 # deg_i*deg_j
s2=0 # deg_i^2+deg_j^2
s3=0 # deg_i+deg_j
m=0 # number of edges
rd=cs.count("d") # number of d nodes to the right
rdi=[ i for i,sym in enumerate(cs) if sym=="d"] # index of "d"s
ds=degree_sequence(cs)
for i,sym in enumerate(cs):
if sym=="d":
if i!=rdi[0]:
print("Logic error in degree_correlation",i,rdi)
raise ValueError
rdi.pop(0)
degi=ds[i]
for dj in rdi:
degj=ds[dj]
s1+=degj*degi
s2+=degi**2+degj**2
s3+=degi+degj
m+=1
denom=(2*m*s2-s3*s3)
numer=(4*m*s1-s3*s3)
if denom==0:
if numer==0:
return 1
raise ValueError("Zero Denominator but Numerator is %s"%numer)
return numer/float(denom)
def shortest_path(creation_sequence,u,v):
"""
Find the shortest path between u and v in a
threshold graph G with the given creation_sequence.
For an unlabeled creation_sequence, the vertices
u and v must be integers in (0,len(sequence)) refering
to the position of the desired vertices in the sequence.
For a labeled creation_sequence, u and v are labels of veritices.
Use cs=creation_sequence(degree_sequence,with_labels=True)
to convert a degree sequence to a creation sequence.
Returns a list of vertices from u to v.
Example: if they are neighbors, it returns [u,v]
"""
# Turn input sequence into a labeled creation sequence
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
cs = [(i,creation_sequence[i]) for i in range(len(creation_sequence))]
elif isinstance(first,tuple): # labeled creation sequence
cs = creation_sequence[:]
elif isinstance(first,int): # compact creation sequence
ci = uncompact(creation_sequence)
cs = [(i,ci[i]) for i in range(len(ci))]
else:
raise TypeError("Not a valid creation sequence type")
verts=[ s[0] for s in cs ]
if v not in verts:
raise ValueError("Vertex %s not in graph from creation_sequence"%v)
if u not in verts:
raise ValueError("Vertex %s not in graph from creation_sequence"%u)
# Done checking
if u==v: return [u]
uindex=verts.index(u)
vindex=verts.index(v)
bigind=max(uindex,vindex)
if cs[bigind][1]=='d':
return [u,v]
# must be that cs[bigind][1]=='i'
cs=cs[bigind:]
while cs:
vert=cs.pop()
if vert[1]=='d':
return [u,vert[0],v]
# All after u are type 'i' so no connection
return -1
def shortest_path_length(creation_sequence,i):
"""
Return the shortest path length from indicated node to
every other node for the threshold graph with the given
creation sequence.
Node is indicated by index i in creation_sequence unless
creation_sequence is labeled in which case, i is taken to
be the label of the node.
Paths lengths in threshold graphs are at most 2.
Length to unreachable nodes is set to -1.
"""
# Turn input sequence into a labeled creation sequence
first=creation_sequence[0]
if isinstance(first,str): # creation sequence
if isinstance(creation_sequence,list):
cs = creation_sequence[:]
else:
cs = list(creation_sequence)
elif isinstance(first,tuple): # labeled creation sequence
cs = [ v[1] for v in creation_sequence]
i = [v[0] for v in creation_sequence].index(i)
elif isinstance(first,int): # compact creation sequence
cs = uncompact(creation_sequence)
else:
raise TypeError("Not a valid creation sequence type")
# Compute
N=len(cs)
spl=[2]*N # length 2 to every node
spl[i]=0 # except self which is 0
# 1 for all d's to the right
for j in range(i+1,N):
if cs[j]=="d":
spl[j]=1
if cs[i]=='d': # 1 for all nodes to the left
for j in range(i):
spl[j]=1
# and -1 for any trailing i to indicate unreachable
for j in range(N-1,0,-1):
if cs[j]=="d":
break
spl[j]=-1
return spl
def betweenness_sequence(creation_sequence,normalized=True):
"""
Return betweenness for the threshold graph with the given creation
sequence. The result is unscaled. To scale the values
to the iterval [0,1] divide by (n-1)*(n-2).
"""
cs=creation_sequence
seq=[] # betweenness
lastchar='d' # first node is always a 'd'
dr=float(cs.count("d")) # number of d's to the right of curren pos
irun=0 # number of i's in the last run
drun=0 # number of d's in the last run
dlast=0.0 # betweenness of last d
for i,c in enumerate(cs):
if c=='d': #cs[i]=="d":
# betweennees = amt shared with eariler d's and i's
# + new isolated nodes covered
# + new paths to all previous nodes
b=dlast + (irun-1)*irun/dr + 2*irun*(i-drun-irun)/dr
drun+=1 # update counter
else: # cs[i]="i":
if lastchar=='d': # if this is a new run of i's
dlast=b # accumulate betweenness
dr-=drun # update number of d's to the right
drun=0 # reset d counter
irun=0 # reset i counter
b=0 # isolated nodes have zero betweenness
irun+=1 # add another i to the run
seq.append(float(b))
lastchar=c
# normalize by the number of possible shortest paths
if normalized:
order=len(cs)
scale=1.0/((order-1)*(order-2))
seq=[ s*scale for s in seq ]
return seq
def eigenvectors(creation_sequence):
"""
Return a 2-tuple of Laplacian eigenvalues and eigenvectors
for the threshold network with creation_sequence.
The first value is a list of eigenvalues.
The second value is a list of eigenvectors.
The lists are in the same order so corresponding eigenvectors
and eigenvalues are in the same position in the two lists.
Notice that the order of the eigenvalues returned by eigenvalues(cs)
may not correspond to the order of these eigenvectors.
"""
ccs=make_compact(creation_sequence)
N=sum(ccs)
vec=[0]*N
val=vec[:]
# get number of type d nodes to the right (all for first node)
dr=sum(ccs[::2])
nn=ccs[0]
vec[0]=[1./sqrt(N)]*N
val[0]=0
e=dr
dr-=nn
type_d=True
i=1
dd=1
while dd<nn:
scale=1./sqrt(dd*dd+i)
vec[i]=i*[-scale]+[dd*scale]+[0]*(N-i-1)
val[i]=e
i+=1
dd+=1
if len(ccs)==1: return (val,vec)
for nn in ccs[1:]:
scale=1./sqrt(nn*i*(i+nn))
vec[i]=i*[-nn*scale]+nn*[i*scale]+[0]*(N-i-nn)
# find eigenvalue
type_d=not type_d
if type_d:
e=i+dr
dr-=nn
else:
e=dr
val[i]=e
st=i
i+=1
dd=1
while dd<nn:
scale=1./sqrt(i-st+dd*dd)
vec[i]=[0]*st+(i-st)*[-scale]+[dd*scale]+[0]*(N-i-1)
val[i]=e
i+=1
dd+=1
return (val,vec)
def spectral_projection(u,eigenpairs):
"""
Returns the coefficients of each eigenvector
in a projection of the vector u onto the normalized
eigenvectors which are contained in eigenpairs.
eigenpairs should be a list of two objects. The
first is a list of eigenvalues and the second a list
of eigenvectors. The eigenvectors should be lists.
There's not a lot of error checking on lengths of
arrays, etc. so be careful.
"""
coeff=[]
evect=eigenpairs[1]
for ev in evect:
c=sum([ evv*uv for (evv,uv) in zip(ev,u)])
coeff.append(c)
return coeff
def eigenvalues(creation_sequence):
"""
Return sequence of eigenvalues of the Laplacian of the threshold
graph for the given creation_sequence.
Based on the Ferrer's diagram method. The spectrum is integral
and is the conjugate of the degree sequence.
See::
@Article{degree-merris-1994,
author = {Russel Merris},
title = {Degree maximal graphs are Laplacian integral},
journal = {Linear Algebra Appl.},
year = {1994},
volume = {199},
pages = {381--389},
}
"""
degseq=degree_sequence(creation_sequence)
degseq.sort()
eiglist=[] # zero is always one eigenvalue
eig=0
row=len(degseq)
bigdeg=degseq.pop()
while row:
if bigdeg<row:
eiglist.append(eig)
row-=1
else:
eig+=1
if degseq:
bigdeg=degseq.pop()
else:
bigdeg=0
return eiglist
### Threshold graph creation routines
def random_threshold_sequence(n,p,seed=None):
"""
Create a random threshold sequence of size n.
A creation sequence is built by randomly choosing d's with
probabiliy p and i's with probability 1-p.
s=nx.random_threshold_sequence(10,0.5)
returns a threshold sequence of length 10 with equal
probably of an i or a d at each position.
A "random" threshold graph can be built with
G=nx.threshold_graph(s)
"""
if not seed is None:
random.seed(seed)
if not (p<=1 and p>=0):
raise ValueError("p must be in [0,1]")
cs=['d'] # threshold sequences always start with a d
for i in range(1,n):
if random.random() < p:
cs.append('d')
else:
cs.append('i')
return cs
# maybe *_d_threshold_sequence routines should
# be (or be called from) a single routine with a more descriptive name
# and a keyword parameter?
def right_d_threshold_sequence(n,m):
"""
Create a skewed threshold graph with a given number
of vertices (n) and a given number of edges (m).
The routine returns an unlabeled creation sequence
for the threshold graph.
FIXME: describe algorithm
"""
cs=['d']+['i']*(n-1) # create sequence with n insolated nodes
# m <n : not enough edges, make disconnected
if m < n:
cs[m]='d'
return cs
# too many edges
if m > n*(n-1)/2:
raise ValueError("Too many edges for this many nodes.")
# connected case m >n-1
ind=n-1
sum=n-1
while sum<m:
cs[ind]='d'
ind -= 1
sum += ind
ind=m-(sum-ind)
cs[ind]='d'
return cs
def left_d_threshold_sequence(n,m):
"""
Create a skewed threshold graph with a given number
of vertices (n) and a given number of edges (m).
The routine returns an unlabeled creation sequence
for the threshold graph.
FIXME: describe algorithm
"""
cs=['d']+['i']*(n-1) # create sequence with n insolated nodes
# m <n : not enough edges, make disconnected
if m < n:
cs[m]='d'
return cs
# too many edges
if m > n*(n-1)/2:
raise ValueError("Too many edges for this many nodes.")
# Connected case when M>N-1
cs[n-1]='d'
sum=n-1
ind=1
while sum<m:
cs[ind]='d'
sum += ind
ind += 1
if sum>m: # be sure not to change the first vertex
cs[sum-m]='i'
return cs
def swap_d(cs,p_split=1.0,p_combine=1.0,seed=None):
"""
Perform a "swap" operation on a threshold sequence.
The swap preserves the number of nodes and edges
in the graph for the given sequence.
The resulting sequence is still a threshold sequence.
Perform one split and one combine operation on the
'd's of a creation sequence for a threshold graph.
This operation maintains the number of nodes and edges
in the graph, but shifts the edges from node to node
maintaining the threshold quality of the graph.
"""
if not seed is None:
random.seed(seed)
# preprocess the creation sequence
dlist= [ i for (i,node_type) in enumerate(cs[1:-1]) if node_type=='d' ]
# split
if random.random()<p_split:
choice=random.choice(dlist)
split_to=random.choice(range(choice))
flip_side=choice-split_to
if split_to!=flip_side and cs[split_to]=='i' and cs[flip_side]=='i':
cs[choice]='i'
cs[split_to]='d'
cs[flip_side]='d'
dlist.remove(choice)
# don't add or combine may reverse this action
# dlist.extend([split_to,flip_side])
# print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side)
# combine
if random.random()<p_combine and dlist:
first_choice= random.choice(dlist)
second_choice=random.choice(dlist)
target=first_choice+second_choice
if target >= len(cs) or cs[target]=='d' or first_choice==second_choice:
return cs
# OK to combine
cs[first_choice]='i'
cs[second_choice]='i'
cs[target]='d'
# print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target)
return cs
| 31.894621 | 115 | 0.608033 |
87e9c24c1b9dd8a0d152176d1baaf2d24870446a | 4,910 | py | Python | cogs/wikiScrape.py | LastAeon77/LibraryOfRuinaBot | 87bbec4a796b6973a975e6eee3c2042754d8ae67 | [
"MIT"
] | 1 | 2020-08-26T12:16:22.000Z | 2020-08-26T12:16:22.000Z | cogs/wikiScrape.py | LastAeon77/LibraryOfRuinaBot | 87bbec4a796b6973a975e6eee3c2042754d8ae67 | [
"MIT"
] | null | null | null | cogs/wikiScrape.py | LastAeon77/LibraryOfRuinaBot | 87bbec4a796b6973a975e6eee3c2042754d8ae67 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import discord
from discord.ext import commands
import pandas as pd
class LibraryScrape:
def __init__(self, searchTerm):
df = pd.read_csv("./data/wiki.csv")
row = df.loc[df["SearchTerms"].str.lower() == searchTerm.lower()]
self.searchType = row.iloc[0]["Type"]
self.link = "https://library-of-ruina.fandom.com/wiki/" + searchTerm
source = requests.get(self.link).text
self.soup = BeautifulSoup(source, "lxml")
self.imageLink = ""
self.contents = ""
def Characters(self):
img = self.soup.find_all("figure", attrs={"class": "pi-item pi-image"})
k = []
for imgs in img:
k.append(imgs.find("img"))
src = k[0].attrs["src"]
self.imageLink = src
words = self.soup.find_all("p")
temp = []
for paragraphs in words:
temp.append(paragraphs)
self.contents += temp[1].get_text()
self.contents += "\n"
self.contents += temp[2].get_text()
self.contents += "\n"
self.contents += self.link
final = []
final.append(self.imageLink)
final.append(self.contents)
return final
def Mechanics(self):
img = self.soup.find_all(
"figure", attrs={"class": "article-thumb tright show-info-icon"}
)
k = []
for imgs in img:
k.append(imgs.find("img"))
src = k[0].attrs["src"]
self.imageLink = src
words = self.soup.find_all("p")
temp = []
for paragraphs in words:
temp.append(paragraphs)
self.contents += temp[1].get_text()
self.contents += "\n"
self.contents += temp[2].get_text()
# self.contents += temp[3].get_text()
final = []
final.append(self.imageLink)
final.append(self.contents)
return final
def Floors(self):
try:
img = self.soup.find_all("a", attrs={"class": "image image-thumbnail"})
k = []
for imgs in img:
k.append(imgs)
src = k[0]["href"]
self.imageLink = src
except:
self.imageLink = "https://i.imgflip.com/j69nf.jpg"
words = self.soup.find_all("p")
temp = []
for paragraphs in words:
temp.append(paragraphs)
self.contents += temp[1].get_text()
self.contents += "\n"
self.contents += temp[2].get_text()
final = []
final.append(self.imageLink)
final.append(self.contents)
return final
class LibraryStuff(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def wiki(self, ctx, arx: str):
"""Searches wiki for topics"""
name = arx
embed = discord.Embed()
embed.color = 3066993
embed.set_author(name=str(arx))
link = ""
content = ""
try:
temp = LibraryScrape(name)
if temp.searchType == "Characters":
link, content = temp.Characters()
elif temp.searchType == "Floors":
link, content = temp.Floors()
elif temp.searchType == "Mechanics":
link, content = temp.Mechanics()
except:
link = "https://i.imgflip.com/j69nf.jpg"
content = "Content not found, either it doesn't"
content += " exist or Master Last_Aeon was lazy"
content += "\n If you typed floor, try Floor_of_History"
embed.set_image(url=link)
embed.set_author(name=name.capitalize())
embed.description = content
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(LibraryStuff(bot))
# x = LibraryScrape("key_pages")
# print(x.Mechanics())
# link = "https://library-of-ruina.fandom.com/wiki/Floor_of_History"
# source = requests.get(link).text
# soup = BeautifulSoup(source, "lxml")
# img = soup.find_all("a", attrs={"class": "image image-thumbnail"})
# k = []
# for imgs in img:
# k.append(imgs)
# print(k[0]["href"])
# x = LibraryScrape("Invitations")
# print(x.Mechanics())
# img = soup.find('div', class_=' image-WikiaSiteWrapper')
# #find all stff with figure and class name this
# summary = soup.find_all("figure", attrs={"class":"pi-item pi-image"})
# #print(summary)
# k=[]
# #find image
# for imgs in summary:
# k.append(imgs.find("img"))
# #summary = img.find('div', class_='image-WikiaSiteWrapper')
# #print(k[0].attrs)
# #get the image src from attributes
# src = k[0].attrs['src']
# #print(src)
# #get link
# img_link=src
# img = requests.get(img_link)
# #download
# #with open("cogs/angela.png", "wb") as f:
# #f.write(img.content)
# words = soup.find_all("p")
# temp=[]
# for paragraphs in words:
# temp.append(paragraphs)
# temp[0]
# print(temp[2].get_text())
| 28.71345 | 83 | 0.569654 |
4472399a5142731d50df281c0363aab60b7b92fe | 404 | py | Python | baselines/deepq/__init__.py | qwertyanonym789/anonym_code | d939b907badabbb01759a3677a0605194564ee96 | [
"MIT"
] | 282 | 2018-06-20T23:00:01.000Z | 2022-03-30T06:25:17.000Z | baselines/deepq/__init__.py | qwertyanonym789/anonym_code | d939b907badabbb01759a3677a0605194564ee96 | [
"MIT"
] | 10 | 2019-03-25T08:19:51.000Z | 2021-11-22T12:25:22.000Z | baselines/deepq/__init__.py | qwertyanonym789/anonym_code | d939b907badabbb01759a3677a0605194564ee96 | [
"MIT"
] | 106 | 2018-06-20T23:06:40.000Z | 2022-03-03T05:04:03.000Z | from baselines.deepq import models # noqa
from baselines.deepq.build_graph import build_act, build_train # noqa
from baselines.deepq.simple import learn, load # noqa
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=True) | 50.5 | 87 | 0.816832 |
f4cf823a11daf8fa4dbfb33f871bd4f608bd0841 | 2,052 | py | Python | bindings/python/examples/return_region.py | mmccarty/legion | 30e00fa6016527c4cf60025a461fb7865f8def6b | [
"Apache-2.0"
] | 555 | 2015-01-19T07:50:27.000Z | 2022-03-22T11:35:48.000Z | bindings/python/examples/return_region.py | mmccarty/legion | 30e00fa6016527c4cf60025a461fb7865f8def6b | [
"Apache-2.0"
] | 1,157 | 2015-01-07T18:34:23.000Z | 2022-03-31T19:45:27.000Z | bindings/python/examples/return_region.py | mmccarty/legion | 30e00fa6016527c4cf60025a461fb7865f8def6b | [
"Apache-2.0"
] | 145 | 2015-02-03T02:31:42.000Z | 2022-02-28T12:03:51.000Z | #!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task, Region, RW
@task
def make_region():
# If you return a region from a task, the privileges to the region
# will be automatically given to the calling task.
R = Region([4, 4], {'x': pygion.float64})
pygion.fill(R, 'x', 0)
print('returning from make_region with', R)
return R
@task
def make_region_dict():
# It should also work if the region in question is returned as
# part of a larger data structure.
R = Region([4, 4], {'x': pygion.float64})
pygion.fill(R, 'x', 0)
result = {'asdf': R}
print('returning from make_region_dict with', result)
return result
@task(privileges=[RW])
def use_region(R):
print('in use_region with', R)
R.x.fill(123)
@task(privileges=[RW])
def pass_region_nested(R, depth):
# Passing and return region arguments also works, including to
# recursive subtasks.
if depth > 0:
return pass_region_nested(R, depth-1).get()
R.x.fill(456)
return R
@task
def main():
R = make_region().get()
use_region(R)
print('in main with', R)
R.x.fill(1)
R2 = make_region_dict().get()['asdf']
use_region(R2)
print('in main with', R2)
R2.x.fill(124)
R_copy = pass_region_nested(R, 5).get()
# Check that this is the same region.
assert R.handle[0].tree_id == R_copy.handle[0].tree_id
if __name__ == '__main__':
main()
| 27 | 74 | 0.681287 |
795af779c8632aff242bcdccdb44123e7a86ed56 | 14,133 | py | Python | tests/test_minimalism.py | georgesdimitrov/arvo | 86d33afc3f45d1f2e6f22aded8c2e2b12bc5db7d | [
"MIT"
] | 11 | 2021-02-24T20:05:24.000Z | 2022-03-13T14:27:04.000Z | tests/test_minimalism.py | georgesdimitrov/arvo | 86d33afc3f45d1f2e6f22aded8c2e2b12bc5db7d | [
"MIT"
] | null | null | null | tests/test_minimalism.py | georgesdimitrov/arvo | 86d33afc3f45d1f2e6f22aded8c2e2b12bc5db7d | [
"MIT"
] | 3 | 2021-09-24T02:26:16.000Z | 2022-03-22T12:34:36.000Z | import pytest
from music21 import converter
from arvo import minimalism
from arvo import sequences
@pytest.fixture
def example_stream():
s = converter.parse("tinyNotation: C D E F G A B c d e f g")
return s
# Additive Process Tests
def test_additive_process(example_stream):
result = minimalism.additive_process(example_stream)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D E
C D E F
C D E F G
C D E F G A
C D E F G A B
C D E F G A B c
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
@pytest.mark.parametrize(
"direction,intended_result",
[
(
minimalism.Direction.BACKWARD,
converter.parse(
"""tinyNotation:
g
f g
e f g
d e f g
c d e f g
B c d e f g
A B c d e f g
G A B c d e f g
F G A B c d e f g
E F G A B c d e f g
D E F G A B c d e f g
C D E F G A B c d e f g
"""
),
),
(
minimalism.Direction.INWARD,
converter.parse(
"""tinyNotation:
C g
C D f g
C D E e f g
C D E F d e f g
C D E F G c d e f g
C D E F G A B c d e f g
"""
),
),
(
minimalism.Direction.OUTWARD,
converter.parse(
"""tinyNotation:
A B
G A B c
F G A B c d
E F G A B c d e
D E F G A B c d e f
C D E F G A B c d e f g
"""
),
),
],
)
def test_additive_process_direction(example_stream, direction, intended_result):
result = minimalism.additive_process(example_stream, direction=direction)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_int(example_stream):
result = minimalism.additive_process(example_stream, step_value=2)
intended_result = converter.parse(
"""tinyNotation:
C D
C D E F
C D E F G A
C D E F G A B c
C D E F G A B c d e
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_sequence(example_stream):
result = minimalism.additive_process(example_stream, step_value=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C
C D E
C D E F G A
C D E F G A B
C D E F G A B c d
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_sequence_absolute(example_stream):
result = minimalism.additive_process(
example_stream,
step_value=sequences.PRIMES,
step_mode=minimalism.StepMode.ABSOLUTE,
)
intended_result = converter.parse(
"""tinyNotation:
C D
C D E
C D E F G
C D E F G A B
C D E F G A B c d e f
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_sequence_absolute_infinite_loop(example_stream):
result = minimalism.additive_process(
example_stream, step_value=[1, 2, 3], step_mode=minimalism.StepMode.ABSOLUTE
)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D E
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_repetitions_int(example_stream):
result = minimalism.additive_process(example_stream, repetitions=2)
intended_result = converter.parse(
"""tinyNotation:
C
C
C D
C D
C D E
C D E
C D E F
C D E F
C D E F G
C D E F G
C D E F G A
C D E F G A
C D E F G A B
C D E F G A B
C D E F G A B c
C D E F G A B c
C D E F G A B c d
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f
C D E F G A B c d e f g
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_repetitions_sequence(example_stream):
result = minimalism.additive_process(example_stream, repetitions=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D
C D E
C D E
C D E
C D E F
C D E F G
C D E F G
C D E F G A
C D E F G A
C D E F G A
C D E F G A B
C D E F G A B c
C D E F G A B c
C D E F G A B c d
C D E F G A B c d
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f
C D E F G A B c d e f g
C D E F G A B c d e f g
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_iterations_start(example_stream):
result = minimalism.additive_process(example_stream, iterations_start=3)
intended_result = converter.parse(
"""tinyNotation:
C D E
C D E F
C D E F G
C D E F G A
C D E F G A B
C D E F G A B c
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_iterations_end(example_stream):
result = minimalism.additive_process(example_stream, iterations_end=8)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D E
C D E F
C D E F G
C D E F G A
C D E F G A B
C D E F G A B c
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_nonlinear(example_stream):
result = minimalism.additive_process(
example_stream,
step_value=sequences.kolakoski(),
step_mode=minimalism.StepMode.ABSOLUTE,
iterations_end=8,
)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D
C
C
C D
C
C D
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
# Subtractive Process Tests
def test_subtractive_process(example_stream):
result = minimalism.subtractive_process(example_stream)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
G A B c d e f g
A B c d e f g
B c d e f g
c d e f g
d e f g
e f g
f g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
@pytest.mark.parametrize(
"direction,intended_result",
[
(
minimalism.Direction.BACKWARD,
converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
C D E F G A B c d e f
C D E F G A B c d e
C D E F G A B c d
C D E F G A B c
C D E F G A B
C D E F G A
C D E F G
C D E F
C D E
C D
C
"""
),
),
(
minimalism.Direction.INWARD,
converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f
E F G A B c d e
F G A B c d
G A B c
A B
"""
),
),
(
minimalism.Direction.OUTWARD,
converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
C D E F G c d e f g
C D E F d e f g
C D E e f g
C D f g
C g
"""
),
),
],
)
def test_subtractive_process_direction(example_stream, direction, intended_result):
result = minimalism.subtractive_process(example_stream, direction=direction)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_int(example_stream):
result = minimalism.subtractive_process(example_stream, step_value=2)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
E F G A B c d e f g
G A B c d e f g
B c d e f g
d e f g
f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_sequence(example_stream):
result = minimalism.subtractive_process(example_stream, step_value=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
F G A B c d e f g
B c d e f g
c d e f g
e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_sequence_absolute(example_stream):
result = minimalism.subtractive_process(
example_stream,
step_value=sequences.PRIMES,
step_mode=minimalism.StepMode.ABSOLUTE,
)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
A B c d e f g
c d e f g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_sequence_absolute_infinite_loop(example_stream):
result = minimalism.subtractive_process(
example_stream, step_value=[1, 2, 3], step_mode=minimalism.StepMode.ABSOLUTE
)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_repetitions_int(example_stream):
result = minimalism.subtractive_process(example_stream, repetitions=2)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
C D E F G A B c d e f g
D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
F G A B c d e f g
G A B c d e f g
G A B c d e f g
A B c d e f g
A B c d e f g
B c d e f g
B c d e f g
c d e f g
c d e f g
d e f g
d e f g
e f g
e f g
f g
f g
g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_repetitions_sequence(example_stream):
result = minimalism.subtractive_process(example_stream, repetitions=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
G A B c d e f g
G A B c d e f g
A B c d e f g
A B c d e f g
A B c d e f g
B c d e f g
c d e f g
c d e f g
d e f g
d e f g
d e f g
e f g
f g
f g
g
g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_iterations_start(example_stream):
result = minimalism.subtractive_process(example_stream, iterations_start=3)
intended_result = converter.parse(
"""tinyNotation:
F G A B c d e f g
G A B c d e f g
A B c d e f g
B c d e f g
c d e f g
d e f g
e f g
f g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_iterations_end(example_stream):
result = minimalism.subtractive_process(example_stream, iterations_end=8)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
G A B c d e f g
A B c d e f g
B c d e f g
c d e f g
d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_nonlinear(example_stream):
result = minimalism.subtractive_process(
example_stream,
step_value=sequences.kolakoski(),
step_mode=minimalism.StepMode.ABSOLUTE,
iterations_end=8,
)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
| 25.93211 | 88 | 0.51355 |
92ddd9021e3080cae79d3a084d3079595f9d0f28 | 2,236 | py | Python | drf_spectacular/types.py | Mopsan/drf-spectacular | 5d1280c29994793be517b443fd5d902525c6ab3e | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/types.py | Mopsan/drf-spectacular | 5d1280c29994793be517b443fd5d902525c6ab3e | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/types.py | Mopsan/drf-spectacular | 5d1280c29994793be517b443fd5d902525c6ab3e | [
"BSD-3-Clause"
] | null | null | null | import enum
from datetime import datetime, date
from decimal import Decimal
from uuid import UUID
class OpenApiTypes(enum.Enum):
"""
Basic types known to the OpenApi specification or at least
common format extension of it.
- Use BYTE for base64 encoded data wrapped in a string
- Use BINARY for raw binary data
- Use OBJECT for arbitrary free-form object (usually a dict)
"""
FLOAT = enum.auto()
BOOL = enum.auto()
STR = enum.auto()
BYTE = enum.auto() # base64 encoded
BINARY = enum.auto()
INT = enum.auto()
UUID = enum.auto()
URI = enum.auto()
IP4 = enum.auto()
IP6 = enum.auto()
HOSTNAME = enum.auto()
DECIMAL = enum.auto()
DATETIME = enum.auto()
DATE = enum.auto()
EMAIL = enum.auto()
OBJECT = enum.auto()
NONE = enum.auto()
# make a copy with dict() before modifying returned dict
OPENAPI_TYPE_MAPPING = {
OpenApiTypes.FLOAT: {'type': 'number', 'format': 'float'},
OpenApiTypes.BOOL: {'type': 'boolean'},
OpenApiTypes.STR: {'type': 'string'},
OpenApiTypes.BYTE: {'type': 'string', 'format': 'byte'},
OpenApiTypes.BINARY: {'type': 'string', 'format': 'binary'},
OpenApiTypes.INT: {'type': 'integer'},
OpenApiTypes.UUID: {'type': 'string', 'format': 'uuid'},
OpenApiTypes.URI: {'type': 'string', 'format': 'uri'},
OpenApiTypes.IP4: {'type': 'string', 'format': 'ipv4'},
OpenApiTypes.IP6: {'type': 'string', 'format': 'ipv6'},
OpenApiTypes.HOSTNAME: {'type': 'string', 'format': 'hostname'},
OpenApiTypes.DECIMAL: {'type': 'number', 'format': 'double'},
OpenApiTypes.DATETIME: {'type': 'string', 'format': 'date-time'},
OpenApiTypes.DATE: {'type': 'string', 'format': 'date'},
OpenApiTypes.EMAIL: {'type': 'string', 'format': 'email'},
OpenApiTypes.OBJECT: {'type': 'object', 'additionalProperties': {}},
OpenApiTypes.NONE: {},
}
PYTHON_TYPE_MAPPING = {
str: OpenApiTypes.STR,
float: OpenApiTypes.FLOAT,
bool: OpenApiTypes.BOOL,
bytes: OpenApiTypes.BINARY,
int: OpenApiTypes.INT,
UUID: OpenApiTypes.UUID,
Decimal: OpenApiTypes.DECIMAL,
datetime: OpenApiTypes.DATETIME,
date: OpenApiTypes.DATE,
dict: OpenApiTypes.OBJECT,
}
| 32.405797 | 72 | 0.636852 |
d34f9f5fc37012d32625baa62f9b895061d2172d | 2,696 | py | Python | tbx/core/migrations/0015_auto_20160218_1201.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 103 | 2015-02-24T17:58:21.000Z | 2022-03-23T08:08:58.000Z | tbx/core/migrations/0015_auto_20160218_1201.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 145 | 2015-01-13T17:13:43.000Z | 2022-03-29T12:56:20.000Z | tbx/core/migrations/0015_auto_20160218_1201.py | elviva404/wagtail-torchbox | 718d9e2c4337073f010296932d369c726a01dbd3 | [
"MIT"
] | 57 | 2015-01-03T12:00:37.000Z | 2022-02-09T13:11:30.000Z | # -*- coding: utf-8 -*-
from django.db import models, migrations
import wagtail.images.models
import django.db.models.deletion
from django.conf import settings
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
("torchbox", "0014_workpage_show_in_play_menu"),
]
operations = [
migrations.AddField(
model_name="torchboximage",
name="file_size",
field=models.PositiveIntegerField(null=True, editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="created_at",
field=models.DateTimeField(
auto_now_add=True, verbose_name="created at", db_index=True
),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="file",
field=models.ImageField(
height_field="height",
upload_to=wagtail.images.models.get_upload_to,
width_field="width",
verbose_name="file",
),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="height",
field=models.IntegerField(verbose_name="height", editable=False),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="tags",
field=taggit.managers.TaggableManager(
to="taggit.Tag",
through="taggit.TaggedItem",
blank=True,
help_text=None,
verbose_name="tags",
),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="title",
field=models.CharField(max_length=255, verbose_name="title"),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="uploaded_by_user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.SET_NULL,
blank=True,
editable=False,
to=settings.AUTH_USER_MODEL,
null=True,
verbose_name="uploaded by user",
),
preserve_default=True,
),
migrations.AlterField(
model_name="torchboximage",
name="width",
field=models.IntegerField(verbose_name="width", editable=False),
preserve_default=True,
),
]
| 30.988506 | 77 | 0.546365 |
c7c86252b8187032ffab0bbff534cb8f55761934 | 12,250 | py | Python | thirdweb/core/classes/marketplace_direct.py | nftlabs/nftlabs-sdk-python | ea533142dc0881872b347cd8ce635dc0bfff3153 | [
"Apache-2.0"
] | 30 | 2021-10-31T13:17:58.000Z | 2022-02-04T13:41:13.000Z | thirdweb/core/classes/marketplace_direct.py | nftlabs/nftlabs-sdk-python | ea533142dc0881872b347cd8ce635dc0bfff3153 | [
"Apache-2.0"
] | 36 | 2021-11-03T20:30:38.000Z | 2022-02-14T10:15:40.000Z | thirdweb/core/classes/marketplace_direct.py | nftlabs/nftlabs-sdk-python | ea533142dc0881872b347cd8ce635dc0bfff3153 | [
"Apache-2.0"
] | 10 | 2021-11-10T19:59:41.000Z | 2022-01-21T21:26:55.000Z | from re import M
from typing import Any, Dict, Optional, cast
from eth_utils import is_address
from thirdweb.abi import Marketplace
from thirdweb.abi import IERC165
from thirdweb.abi.ierc1155 import IERC1155
from thirdweb.abi.ierc721 import IERC721
from thirdweb.abi.marketplace import IMarketplaceListingParameters
from thirdweb.common.currency import (
fetch_currency_value,
is_native_token,
normalize_price_value,
set_erc20_allowance,
)
from web3.constants import MAX_INT
from thirdweb.common.error import ListingNotFoundException, WrongListingTypeException
from thirdweb.common.marketplace import (
handle_token_approval,
is_token_approved_for_transfer,
map_offer,
validate_new_listing_param,
)
from thirdweb.common.nft import fetch_token_metadata_for_contract
from thirdweb.constants.contract import INTERFACE_ID_IERC1155, INTERFACE_ID_IERC721
from thirdweb.constants.currency import ZERO_ADDRESS
from thirdweb.core.classes.base_contract import BaseContract
from thirdweb.core.classes.contract_wrapper import ContractWrapper
from thirdweb.core.classes.ipfs_storage import IpfsStorage
from thirdweb.types.currency import Price
from thirdweb.types.marketplace import (
ContractListing,
ContractOffer,
DirectListing,
ListingType,
NewDirectListing,
Offer,
)
from web3.eth import TxReceipt
class MarketplaceDirect(BaseContract[Marketplace]):
_storage: IpfsStorage
def __init__(self, contract_wrapper: ContractWrapper, storage: IpfsStorage):
super().__init__(contract_wrapper)
self._storage = storage
"""
READ FUNCTIONS
"""
def get_listing(self, listing_id: int) -> DirectListing:
"""
Get a direct listing from the marketplace by ID
:param listing_id: The ID of the listing to get
:return: The listing
"""
raw_listing = self._contract_wrapper._contract_abi.listings.call(listing_id)
listing = ContractListing(*raw_listing)
if listing.asset_contract == ZERO_ADDRESS:
raise ListingNotFoundException(listing_id)
if ListingType(listing.listing_type) != ListingType.DIRECT:
raise WrongListingTypeException(
listing_id,
"Auction",
"Direct",
)
return self._map_listing(listing)
def get_active_offer(self, listing_id: int, address: str) -> Optional[Offer]:
"""
Get an active offer for a direct listing
:param listing_id: The ID of the listing to get the offer for
:param address: The address of the user to get the offer for
:return: The offer
"""
self._validate_listing(listing_id)
if not is_address(address):
raise Exception("Address must be a valid address")
raw_ofers = self._contract_wrapper._contract_abi.offers.call(
listing_id, address
)
offers = ContractOffer(*raw_ofers)
if offers.offeror == ZERO_ADDRESS:
return None
return map_offer(self._contract_wrapper.get_provider(), listing_id, offers)
"""
WRITE FUNCTIONS
"""
def create_listing(self, listing: NewDirectListing) -> int:
"""
Create a new direct listing
:param listing: The listing to create
:return: The ID of the listing
"""
validate_new_listing_param(listing)
handle_token_approval(
self._contract_wrapper.get_provider(),
self._contract_wrapper.get_signer(),
self.get_address(),
listing.asset_contract_address,
listing.token_id,
self._contract_wrapper.get_signer_address(),
)
normalized_price_per_token = normalize_price_value(
self._contract_wrapper.get_provider(),
listing.buyout_price_per_token,
listing.currency_contract_address,
)
receipt = self._contract_wrapper.send_transaction(
"create_listing",
[
IMarketplaceListingParameters(
assetContract=listing.asset_contract_address,
tokenId=listing.token_id,
startTime=listing.start_time_in_seconds,
secondsUntilEndTime=listing.listing_duration_in_seconds,
quantityToList=listing.quantity,
currencyToAccept=listing.currency_contract_address,
reservePricePerToken=normalized_price_per_token,
buyoutPricePerToken=normalized_price_per_token,
listingType=0,
),
],
)
events = self._contract_wrapper.get_events("ListingAdded", receipt)
return cast(Any, events[0].get("args")).get("listingId")
def make_offer(
self,
listing_id: int,
quantity_desired: int,
currency_contract_address: str,
price_per_token: Price,
expiration_date: int = int(MAX_INT, 0),
) -> TxReceipt:
"""
Make an offer on a direct listing
:param listing_id: The ID of the listing to make the offer on
:param quantity_desired: The quantity desired
:param currency_contract_address: The address of the currency contract
:param price_per_token: The price per token
:return: The transaction receipt
"""
if is_native_token(currency_contract_address):
raise Exception(
"You must used the wrapped native token address when making an offer"
)
normalized_price = normalize_price_value(
self._contract_wrapper.get_provider(),
price_per_token,
currency_contract_address,
)
try:
self.get_listing(listing_id)
except:
raise ListingNotFoundException(listing_id)
overrides: Dict[Any, Any] = {}
set_erc20_allowance(
self._contract_wrapper,
quantity_desired * normalized_price,
currency_contract_address,
overrides,
)
print(
"PARAMS: ",
[
listing_id,
quantity_desired,
currency_contract_address,
normalized_price,
expiration_date,
],
)
# TODO: Add OVERRIDES
return self._contract_wrapper.send_transaction(
"offer",
[
listing_id,
quantity_desired,
currency_contract_address,
normalized_price,
expiration_date,
],
)
def accept_offer(self, listing_id: int, address_or_offerror: str) -> TxReceipt:
"""
Accept a direct listing offer
:param listing_id: The ID of the listing to accept the offer on
:param address_or_offerror: The address of the user to accept the offer for
:return: The transaction receipt
"""
self._validate_listing(listing_id)
raw_offer = self._contract_wrapper._contract_abi.offers.call(
listing_id, address_or_offerror
)
offer = ContractOffer(*raw_offer)
return self._contract_wrapper.send_transaction(
"accept_offer",
[listing_id, address_or_offerror, offer.currency, offer.price_per_token],
)
def buyout_listing(
self, listing_id: int, quantity_desired: int, receiver: Optional[str] = None
) -> TxReceipt:
"""
Buyout a direct listing by ID
:param listing_id: The ID of the listing to buyout
:param quantity_desired: The quantity desired
:param receiver: The address of the user to receive the tokens
:return: The transaction receipt
"""
listing = self._validate_listing(listing_id)
valid = self._is_still_valid_listing(listing, quantity_desired)
if not valid:
raise Exception(
"The asset on this listing has been moved from the lister's wallet, this listing is now invalid"
)
buy_for = receiver if receiver else self._contract_wrapper.get_signer_address()
overrides: Dict[Any, Any] = {}
set_erc20_allowance(
self._contract_wrapper,
listing.buyout_price * quantity_desired,
listing.currency_contract_address,
overrides,
)
# TODO: Add OVERRIDES
return self._contract_wrapper.send_transaction(
"buy",
[
listing_id,
buy_for,
quantity_desired,
listing.currency_contract_address,
listing.buyout_price * quantity_desired,
],
)
def update_listing(self, listing: DirectListing) -> TxReceipt:
"""
Update a direct listing
:param listing: The listing to update
:return: The transaction receipt
"""
return self._contract_wrapper.send_transaction(
"update_listing",
[
listing.id,
listing.quantity,
listing.buyout_price,
listing.buyout_price,
listing.currency_contract_address,
listing.start_time_in_seconds,
listing.seconds_until_end,
],
)
def cancel_listing(self, listing_id: int) -> TxReceipt:
"""
Cancel a direct listing
:param listing_id: The ID of the listing to cancel
:return: The transaction receipt
"""
return self._contract_wrapper.send_transaction(
"cancel_direct_listing", [listing_id]
)
"""
INTERNAL FUNCTIONS
"""
def _validate_listing(self, listing_id: int) -> DirectListing:
try:
return self.get_listing(listing_id)
except:
raise ListingNotFoundException(listing_id)
def _map_listing(self, listing: ContractListing) -> DirectListing:
return DirectListing(
id=listing.listing_id,
asset_contract_address=listing.asset_contract,
token_id=listing.token_id,
buyout_price=listing.buyout_price_per_token,
currency_contract_address=listing.currency,
buyout_currency_value_per_token=fetch_currency_value(
self._contract_wrapper.get_provider(),
listing.currency,
listing.buyout_price_per_token,
),
quantity=listing.quantity,
start_time_in_seconds=listing.start_time,
asset=fetch_token_metadata_for_contract(
listing.asset_contract,
self._contract_wrapper.get_provider(),
listing.token_id,
self._storage,
),
seconds_until_end=listing.end_time,
seller_address=listing.token_owner,
)
def _is_still_valid_listing(
self, listing: DirectListing, quantity: Optional[int] = None
) -> bool:
approved = is_token_approved_for_transfer(
self._contract_wrapper.get_provider(),
self.get_address(),
listing.asset_contract_address,
listing.token_id,
listing.seller_address,
)
if not approved:
return False
provider = self._contract_wrapper.get_provider()
erc165 = IERC165(provider, listing.asset_contract_address)
is_erc721 = erc165.supports_interface.call(INTERFACE_ID_IERC721)
is_erc1155 = erc165.supports_interface.call(INTERFACE_ID_IERC1155)
if is_erc721:
ierc721 = IERC721(provider, listing.asset_contract_address)
return (
ierc721.owner_of.call(listing.token_id).lower()
== listing.seller_address.lower()
)
elif is_erc1155:
ierc1155 = IERC1155(provider, listing.asset_contract_address)
balance = ierc1155.balance_of.call(listing.seller_address, listing.token_id)
return balance > (quantity if quantity is not None else listing.quantity)
else:
return False
| 33.378747 | 112 | 0.627347 |
174f5f45696778188a3c2256a0cf562a5968cb49 | 18 | py | Python | .venv/lib/python3.8/site-packages/nbclient/_version.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | 6 | 2019-01-23T03:51:31.000Z | 2021-02-15T07:54:39.000Z | .venv/lib/python3.8/site-packages/nbclient/_version.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | 52 | 2019-01-23T10:10:30.000Z | 2021-06-27T10:23:10.000Z | .venv/lib/python3.8/site-packages/nbclient/_version.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | 7 | 2019-01-23T10:06:03.000Z | 2020-10-29T18:38:37.000Z | version = '0.5.4'
| 9 | 17 | 0.555556 |
01176a04b19fd37b8ba708957728e250140875bd | 2,971 | py | Python | src/bioregistry/schema/constants.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | src/bioregistry/schema/constants.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | src/bioregistry/schema/constants.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Schema constants."""
import rdflib.namespace
__all__ = [
'bioregistry_schema_terms',
# Namespaces
'bioregistry_collection',
'bioregistry_resource',
'bioregistry_metaresource',
'bioregistry_schema',
'orcid',
]
bioregistry_schema_terms = {
'Resource': "A type for entries in the Bioregistry's registry.",
'Registry': "A type for entries in the Bioregistry's metaregistry.",
'Collection': "A type for entries in the Bioregistry's collections",
'Mapping': "A type, typically instantiated as a blank node, that connects a given resource to a metaresource"
" and a metaidentifier using the hasMetaresource and hasMetaidentifier relations.",
'hasExample': 'An identifier for a resource or metaresource.',
'isProvider': 'Denotes whether a metaresource is capable of acting as a provider. If so, should be accompanied'
' by a "provider_formatter" relation as well.',
'isResolver': 'Denotes whether a metaresource is capable of acting as a resolver. If so, should be accompanied'
' by a "resolver_formatter" relation as well.',
'hasProviderFormatter': "The URL format for a provider that contains $1 for the identifier (or metaidentifier)"
" that should be resolved.",
'hasResolverFormatter': "The URL format for a resolver that contains $1 for the prefix and $2 for the identifier"
" that should be resolved.",
'hasPattern': "The pattern for identifiers in the given resource",
'hasContactEmail': "The email of the contact person for the given resource",
'hasDownloadURL': "A download link for the given resource",
'providesFor': "For resources that do not create their own controlled vocabulary, this relation should be used"
" to point to a different resource that it uses. For example, CTD's gene resource provides for"
" the NCBI Entres Gene resource.",
'isDeprecated': "A property whose subject is a resource that denotes if it is still available and usable?"
" Currently this is a blanket term for decomissioned, unable to locate, abandoned, etc.",
'hasMapping': "A property whose subject is a resource and object is a mapping",
'hasRegistry': "A property whose subject is a mapping and object is a metaresource.",
'hasMetaidentifier': "A property whose subject is a mapping and object is an identifier string.",
}
bioregistry_collection = rdflib.namespace.Namespace('https://bioregistry.io/collection/')
bioregistry_resource = rdflib.namespace.Namespace('https://bioregistry.io/registry/')
bioregistry_metaresource = rdflib.namespace.Namespace('https://bioregistry.io/metaregistry/')
bioregistry_schema = rdflib.namespace.ClosedNamespace(
'https://bioregistry.io/schema/#',
terms=sorted(bioregistry_schema_terms),
)
orcid = rdflib.namespace.Namespace('https://orcid.org/')
| 57.134615 | 117 | 0.705486 |
711581c102a78a817dafd2e61356974155dab7f3 | 1,712 | py | Python | app/core/migrations/0001_initial.py | mxuanvan02/recipe-app-api | f037e5650cbf97a583d6654e035baa518328176e | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | mxuanvan02/recipe-app-api | f037e5650cbf97a583d6654e035baa518328176e | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | mxuanvan02/recipe-app-api | f037e5650cbf97a583d6654e035baa518328176e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-23 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.352941 | 266 | 0.639603 |
cc75956a5994bcbc5e2a64fda1299084eca1951b | 20,297 | py | Python | bop_toolkit_lib/renderer_py.py | Alex-B5/bop_toolkit_ab | a9fc94c68a84a0866b88169321048478763900ca | [
"MIT"
] | null | null | null | bop_toolkit_lib/renderer_py.py | Alex-B5/bop_toolkit_ab | a9fc94c68a84a0866b88169321048478763900ca | [
"MIT"
] | null | null | null | bop_toolkit_lib/renderer_py.py | Alex-B5/bop_toolkit_ab | a9fc94c68a84a0866b88169321048478763900ca | [
"MIT"
] | null | null | null | # Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)
# Center for Machine Perception, Czech Technical University in Prague
"""A Python based renderer."""
import os
import numpy as np
from glumpy import app, gloo, gl
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
from bop_toolkit_lib import renderer
# Set glumpy logging level.
from glumpy.log import log
import logging
log.setLevel(logging.WARNING) # Options: ERROR, WARNING, DEBUG, INFO.
# Set backend (http://glumpy.readthedocs.io/en/latest/api/app-backends.html).
# app.use('glfw') # Options: 'glfw', 'qt5', 'pyside', 'pyglet'.
# RGB vertex shader.
_rgb_vertex_code = """
uniform mat4 u_mv;
uniform mat4 u_nm;
uniform mat4 u_mvp;
uniform vec3 u_light_eye_pos;
attribute vec3 a_position;
attribute vec3 a_normal;
attribute vec3 a_color;
attribute vec2 a_texcoord;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
varying vec3 v_normal;
void main() {
gl_Position = u_mvp * vec4(a_position, 1.0);
v_color = a_color;
v_texcoord = a_texcoord;
// The following points/vectors are expressed in the eye coordinates.
v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // Vertex.
v_L = normalize(u_light_eye_pos - v_eye_pos); // Vector to the light.
v_normal = normalize(u_nm * vec4(a_normal, 1.0)).xyz; // Normal vector.
}
"""
# RGB fragment shader - flat shading.
_rgb_fragment_flat_code = """
uniform float u_light_ambient_w;
uniform sampler2D u_texture;
uniform int u_use_texture;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
void main() {
// Face normal in eye coords.
vec3 f_normal = normalize(cross(dFdx(v_eye_pos), dFdy(v_eye_pos)));
float light_diffuse_w = max(dot(normalize(v_L), normalize(f_normal)), 0.0);
float light_w = u_light_ambient_w + light_diffuse_w;
if(light_w > 1.0) light_w = 1.0;
if(bool(u_use_texture)) {
gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));
}
else {
gl_FragColor = vec4(light_w * v_color, 1.0);
}
}
"""
# RGB fragment shader - Phong shading.
_rgb_fragment_phong_code = """
uniform float u_light_ambient_w;
uniform sampler2D u_texture;
uniform int u_use_texture;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
varying vec3 v_normal;
void main() {
float light_diffuse_w = max(dot(normalize(v_L), normalize(v_normal)), 0.0);
float light_w = u_light_ambient_w + light_diffuse_w;
if(light_w > 1.0) light_w = 1.0;
if(bool(u_use_texture)) {
gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));
}
else {
gl_FragColor = vec4(light_w * v_color, 1.0);
}
}
"""
_rgb_fragment_no_light_code = """
uniform float u_light_ambient_w;
uniform sampler2D u_texture;
uniform int u_use_texture;
varying vec3 v_color;
varying vec2 v_texcoord;
varying vec3 v_eye_pos;
varying vec3 v_L;
void main() {
// Face normal in eye coords.
vec3 f_normal = normalize(cross(dFdx(v_eye_pos), dFdy(v_eye_pos)));
float light_diffuse_w = max(dot(normalize(v_L), normalize(f_normal)), 0.0);
float light_w = u_light_ambient_w + light_diffuse_w;
if(light_w > 1.0) light_w = 1.0;
if(light_w < 1.0) light_w = 1.0; //line added. light is now always 1.0. Could have handled smarter
if(bool(u_use_texture)) {
gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));
}
else {
gl_FragColor = vec4(light_w * v_color, 1.0);
}
}
"""
# Depth vertex shader.
# Ref: https://github.com/julienr/vertex_visibility/blob/master/depth.py
#
# Getting the depth from the depth buffer in OpenGL is doable, see here:
# http://web.archive.org/web/20130416194336/http://olivers.posterous.com/linear-depth-in-glsl-for-real
# http://web.archive.org/web/20130426093607/http://www.songho.ca/opengl/gl_projectionmatrix.html
# http://stackoverflow.com/a/6657284/116067
# but it is difficult to achieve high precision, as explained in this article:
# http://dev.theomader.com/depth-precision/
#
# Once the vertex is in the view coordinates (view * model * v), its depth is
# simply the Z axis. Hence, instead of reading from the depth buffer and undoing
# the projection matrix, we store the Z coord of each vertex in the color
# buffer. OpenGL allows for float32 color buffer components.
_depth_vertex_code = """
uniform mat4 u_mv;
uniform mat4 u_mvp;
attribute vec3 a_position;
attribute vec3 a_color;
varying float v_eye_depth;
void main() {
gl_Position = u_mvp * vec4(a_position, 1.0);
vec3 v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // In eye coords.
// OpenGL Z axis goes out of the screen, so depths are negative
v_eye_depth = -v_eye_pos.z;
}
"""
# Depth fragment shader.
_depth_fragment_code = """
varying float v_eye_depth;
void main() {
gl_FragColor = vec4(v_eye_depth, 0.0, 0.0, 1.0);
}
"""
# Functions to calculate transformation matrices.
# Note that OpenGL expects the matrices to be saved column-wise.
# (Ref: http://www.songho.ca/opengl/gl_transform.html)
def _calc_model_view(model, view):
"""Calculates the model-view matrix.
:param model: 4x4 ndarray with the model matrix.
:param view: 4x4 ndarray with the view matrix.
:return: 4x4 ndarray with the model-view matrix.
"""
return np.dot(model, view)
def _calc_model_view_proj(model, view, proj):
"""Calculates the model-view-projection matrix.
:param model: 4x4 ndarray with the model matrix.
:param view: 4x4 ndarray with the view matrix.
:param proj: 4x4 ndarray with the projection matrix.
:return: 4x4 ndarray with the model-view-projection matrix.
"""
return np.dot(np.dot(model, view), proj)
def _calc_normal_matrix(model, view):
"""Calculates the normal matrix.
Ref: http://www.songho.ca/opengl/gl_normaltransform.html
:param model: 4x4 ndarray with the model matrix.
:param view: 4x4 ndarray with the view matrix.
:return: 4x4 ndarray with the normal matrix.
"""
return np.linalg.inv(np.dot(model, view)).T
def _calc_calib_proj(K, x0, y0, w, h, nc, fc, window_coords='y_down'):
"""Conversion of Hartley-Zisserman intrinsic matrix to OpenGL proj. matrix.
Ref:
1) https://strawlab.org/2011/11/05/augmented-reality-with-OpenGL
2) https://github.com/strawlab/opengl-hz/blob/master/src/calib_test_utils.py
:param K: 3x3 ndarray with the intrinsic camera matrix.
:param x0 The X coordinate of the camera image origin (typically 0).
:param y0: The Y coordinate of the camera image origin (typically 0).
:param w: Image width.
:param h: Image height.
:param nc: Near clipping plane.
:param fc: Far clipping plane.
:param window_coords: 'y_up' or 'y_down'.
:return: 4x4 ndarray with the OpenGL projection matrix.
"""
depth = float(fc - nc)
q = -(fc + nc) / depth
qn = -2 * (fc * nc) / depth
# Draw our images upside down, so that all the pixel-based coordinate
# systems are the same.
if window_coords == 'y_up':
proj = np.array([
[2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],
[0, -2 * K[1, 1] / h, (-2 * K[1, 2] + h + 2 * y0) / h, 0],
[0, 0, q, qn], # Sets near and far planes (glPerspective).
[0, 0, -1, 0]
])
# Draw the images upright and modify the projection matrix so that OpenGL
# will generate window coords that compensate for the flipped image coords.
else:
assert window_coords == 'y_down'
proj = np.array([
[2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],
[0, 2 * K[1, 1] / h, (2 * K[1, 2] - h + 2 * y0) / h, 0],
[0, 0, q, qn], # Sets near and far planes (glPerspective).
[0, 0, -1, 0]
])
return proj.T
class RendererPython(renderer.Renderer):
"""A Python based renderer."""
def __init__(self, width, height, mode='rgb+depth', shading='phong',
bg_color=(0.0, 0.0, 0.0, 0.0)):
"""Constructor.
:param width: Width of the rendered image.
:param height: Height of the rendered image.
:param mode: Rendering mode ('rgb+depth', 'rgb', 'depth').
:param shading: Type of shading ('flat', 'phong', 'no_light').
:param bg_color: Color of the background (R, G, B, A).
"""
super(RendererPython, self).__init__(width, height)
self.mode = mode
self.shading = shading
self.bg_color = bg_color
# Indicators whether to render RGB and/or depth image.
self.render_rgb = self.mode in ['rgb', 'rgb+depth']
self.render_depth = self.mode in ['depth', 'rgb+depth']
# Structures to store object models and related info.
self.models = {}
self.model_bbox_corners = {}
self.model_textures = {}
# Rendered images.
self.rgb = None
self.depth = None
# Window for rendering.
self.window = app.Window(visible=False)
# Per-object vertex and index buffer.
self.vertex_buffers = {}
self.index_buffers = {}
# Per-object OpenGL programs for rendering of RGB and depth images.
self.rgb_programs = {}
self.depth_programs = {}
# The frame buffer object.
rgb_buf = np.zeros(
(self.height, self.width, 4), np.float32).view(gloo.TextureFloat2D)
depth_buf = np.zeros(
(self.height, self.width), np.float32).view(gloo.DepthTexture)
self.fbo = gloo.FrameBuffer(color=rgb_buf, depth=depth_buf)
# Activate the created frame buffer object.
self.fbo.activate()
def add_object(self, obj_id, model_path, **kwargs):
"""See base class."""
# Color of the object model (the original color saved with the object model
# will be used if None).
surf_color = None
if 'surf_color' in kwargs:
surf_color = kwargs['surf_color']
# Load the object model.
model = inout.load_ply(model_path)
self.models[obj_id] = model
# Calculate the 3D bounding box of the model (will be used to set the near
# and far clipping plane).
bb = misc.calc_3d_bbox(
model['pts'][:, 0], model['pts'][:, 1], model['pts'][:, 2])
self.model_bbox_corners[obj_id] = np.array([
[bb[0], bb[1], bb[2]],
[bb[0], bb[1], bb[2] + bb[5]],
[bb[0], bb[1] + bb[4], bb[2]],
[bb[0], bb[1] + bb[4], bb[2] + bb[5]],
[bb[0] + bb[3], bb[1], bb[2]],
[bb[0] + bb[3], bb[1], bb[2] + bb[5]],
[bb[0] + bb[3], bb[1] + bb[4], bb[2]],
[bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]],
])
# Set texture/color of vertices.
self.model_textures[obj_id] = None
# Use the specified uniform surface color.
if surf_color is not None:
colors = np.tile(list(surf_color) + [1.0], [model['pts'].shape[0], 1])
# Set UV texture coordinates to dummy values.
texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)
# Use the model texture.
elif 'texture_file' in self.models[obj_id].keys():
model_texture_path = os.path.join(
os.path.dirname(model_path), self.models[obj_id]['texture_file'])
model_texture = inout.load_im(model_texture_path)
# Normalize the texture image.
if model_texture.max() > 1.0:
model_texture = model_texture.astype(np.float32) / 255.0
model_texture = np.flipud(model_texture)
self.model_textures[obj_id] = model_texture
# UV texture coordinates.
texture_uv = model['texture_uv']
# Set the per-vertex color to dummy values.
colors = np.zeros((model['pts'].shape[0], 3), np.float32)
# Use the original model color.
elif 'colors' in model.keys():
assert (model['pts'].shape[0] == model['colors'].shape[0])
colors = model['colors']
if colors.max() > 1.0:
colors /= 255.0 # Color values are expected in range [0, 1].
# Set UV texture coordinates to dummy values.
texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)
# Set the model color to gray.
else:
colors = np.ones((model['pts'].shape[0], 3), np.float32) * 0.5
# Set UV texture coordinates to dummy values.
texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)
# Set the vertex data.
if self.mode == 'depth':
vertices_type = [
('a_position', np.float32, 3),
('a_color', np.float32, colors.shape[1])
]
vertices = np.array(list(zip(model['pts'], colors)), vertices_type)
else:
if self.shading == 'flat':
vertices_type = [
('a_position', np.float32, 3),
('a_color', np.float32, colors.shape[1]),
('a_texcoord', np.float32, 2)
]
vertices = np.array(list(zip(model['pts'], colors, texture_uv)),
vertices_type)
elif self.shading == 'phong':
vertices_type = [
('a_position', np.float32, 3),
('a_normal', np.float32, 3),
('a_color', np.float32, colors.shape[1]),
('a_texcoord', np.float32, 2)
]
vertices = np.array(list(zip(model['pts'], model['normals'],
colors, texture_uv)), vertices_type)
elif self.shading == 'no_light':
vertices_type = [
('a_position', np.float32, 3),
('a_color', np.float32, colors.shape[1]),
('a_texcoord', np.float32, 2)
]
vertices = np.array(list(zip(model['pts'], colors, texture_uv)),
vertices_type)
else:
raise ValueError('Unknown shading type.')
# Create vertex and index buffer for the loaded object model.
self.vertex_buffers[obj_id] = vertices.view(gloo.VertexBuffer)
self.index_buffers[obj_id] = \
model['faces'].flatten().astype(np.uint32).view(gloo.IndexBuffer)
# Set shader for the selected shading.
if self.shading == 'flat':
rgb_fragment_code = _rgb_fragment_flat_code
elif self.shading == 'phong':
rgb_fragment_code = _rgb_fragment_phong_code
elif self.shading == 'no_light':
rgb_fragment_code = _rgb_fragment_no_light_code
else:
raise ValueError('Unknown shading type.')
# Prepare the RGB OpenGL program.
rgb_program = gloo.Program(_rgb_vertex_code, rgb_fragment_code)
rgb_program.bind(self.vertex_buffers[obj_id])
if self.model_textures[obj_id] is not None:
rgb_program['u_use_texture'] = int(True)
rgb_program['u_texture'] = self.model_textures[obj_id]
else:
rgb_program['u_use_texture'] = int(False)
rgb_program['u_texture'] = np.zeros((1, 1, 4), np.float32)
self.rgb_programs[obj_id] = rgb_program
# Prepare the depth OpenGL program.
depth_program = gloo.Program(_depth_vertex_code,_depth_fragment_code)
depth_program.bind(self.vertex_buffers[obj_id])
self.depth_programs[obj_id] = depth_program
def remove_object(self, obj_id):
"""See base class."""
del self.models[obj_id]
del self.model_bbox_corners[obj_id]
if obj_id in self.model_textures:
del self.model_textures[obj_id]
del self.vertex_buffers[obj_id]
del self.index_buffers[obj_id]
del self.rgb_programs[obj_id]
del self.depth_programs[obj_id]
def render_object(self, obj_id, R, t, fx, fy, cx, cy):
"""See base class."""
# Define the following variables as global so their latest values are always
# seen in function on_draw below.
global curr_obj_id, mat_model, mat_view, mat_proj
curr_obj_id = obj_id
# Model matrix (from object space to world space).
mat_model = np.eye(4, dtype=np.float32)
# View matrix (from world space to eye space; transforms also the coordinate
# system from OpenCV to OpenGL camera space).
mat_view_cv = np.eye(4, dtype=np.float32)
mat_view_cv[:3, :3], mat_view_cv[:3, 3] = R, t.squeeze()
yz_flip = np.eye(4, dtype=np.float32)
yz_flip[1, 1], yz_flip[2, 2] = -1, -1
mat_view = yz_flip.dot(mat_view_cv) # OpenCV to OpenGL camera system.
mat_view = mat_view.T # OpenGL expects column-wise matrix format.
# Calculate the near and far clipping plane from the 3D bounding box.
bbox_corners = self.model_bbox_corners[obj_id]
bbox_corners_ht = np.concatenate(
(bbox_corners, np.ones((bbox_corners.shape[0], 1))), axis=1).transpose()
bbox_corners_eye_z = mat_view_cv[2, :].reshape((1, 4)).dot(bbox_corners_ht)
clip_near = bbox_corners_eye_z.min()
clip_far = bbox_corners_eye_z.max()
# Projection matrix.
K = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
mat_proj = _calc_calib_proj(
K, 0, 0, self.width, self.height, clip_near, clip_far)
@self.window.event
def on_draw(dt):
self.window.clear()
global curr_obj_id, mat_model, mat_view, mat_proj
# Render the RGB image.
if self.render_rgb:
self.rgb = self._draw_rgb(
curr_obj_id, mat_model, mat_view, mat_proj)
# Render the depth image.
if self.render_depth:
self.depth = self._draw_depth(
curr_obj_id, mat_model, mat_view, mat_proj)
# The on_draw function is called framecount+1 times.
app.run(framecount=0)
if self.mode == 'rgb':
return {'rgb': self.rgb}
elif self.mode == 'depth':
return {'depth': self.depth}
elif self.mode == 'rgb+depth':
return {'rgb': self.rgb, 'depth': self.depth}
def _draw_rgb(self, obj_id, mat_model, mat_view, mat_proj):
"""Renders an RGB image.
:param obj_id: ID of the object model to render.
:param mat_model: 4x4 ndarray with the model matrix.
:param mat_view: 4x4 ndarray with the view matrix.
:param mat_proj: 4x4 ndarray with the projection matrix.
:return: HxWx3 ndarray with the rendered RGB image.
"""
# Update the OpenGL program.
program = self.rgb_programs[obj_id]
program['u_light_eye_pos'] = list(self.light_cam_pos)
program['u_light_ambient_w'] = self.light_ambient_weight
program['u_mv'] = _calc_model_view(mat_model, mat_view)
program['u_nm'] = _calc_normal_matrix(mat_model, mat_view)
program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj)
# OpenGL setup.
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glClearColor(
self.bg_color[0], self.bg_color[1], self.bg_color[2], self.bg_color[3])
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glViewport(0, 0, self.width, self.height)
# Keep the back-face culling disabled because of objects which do not have
# well-defined surface (e.g. the lamp from the lm dataset).
gl.glDisable(gl.GL_CULL_FACE)
# Rendering.
program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id])
# Get the content of the FBO texture.
rgb = np.zeros((self.height, self.width, 4), dtype=np.float32)
gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, rgb)
rgb.shape = (self.height, self.width, 4)
rgb = rgb[::-1, :]
rgb = np.round(rgb[:, :, :3] * 255).astype(np.uint8) # Convert to [0, 255].
return rgb
def _draw_depth(self, obj_id, mat_model, mat_view, mat_proj):
"""Renders a depth image.
:param obj_id: ID of the object model to render.
:param mat_model: 4x4 ndarray with the model matrix.
:param mat_view: 4x4 ndarray with the view matrix.
:param mat_proj: 4x4 ndarray with the projection matrix.
:return: HxW ndarray with the rendered depth image.
"""
# Update the OpenGL program.
program = self.depth_programs[obj_id]
program['u_mv'] = _calc_model_view(mat_model, mat_view)
program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj)
# OpenGL setup.
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glClearColor(0.0, 0.0, 0.0, 0.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glViewport(0, 0, self.width, self.height)
# Keep the back-face culling disabled because of objects which do not have
# well-defined surface (e.g. the lamp from the lm dataset).
gl.glDisable(gl.GL_CULL_FACE)
# Rendering.
program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id])
# Get the content of the FBO texture.
depth = np.zeros((self.height, self.width, 4), dtype=np.float32)
gl.glReadPixels(
0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, depth)
depth.shape = (self.height, self.width, 4)
depth = depth[::-1, :]
depth = depth[:, :, 0] # Depth is saved in the first channel
return depth
| 34.227656 | 104 | 0.663793 |
f84aae3e00c7ffcc0d05092565deb86e680e3987 | 7,552 | py | Python | rplugin/python3/lfx/main.py | dcampos/nvim-ulf | 9b70096bf04133a33f09691b1ef1ac89f8f29a85 | [
"MIT"
] | 2 | 2020-10-15T09:34:53.000Z | 2020-10-26T10:31:48.000Z | rplugin/python3/lfx/main.py | dcampos/nvim-ulf | 9b70096bf04133a33f09691b1ef1ac89f8f29a85 | [
"MIT"
] | null | null | null | rplugin/python3/lfx/main.py | dcampos/nvim-ulf | 9b70096bf04133a33f09691b1ef1ac89f8f29a85 | [
"MIT"
] | 1 | 2020-10-15T09:35:05.000Z | 2020-10-15T09:35:05.000Z | #!/usr/bin/env python3
from core.types import ClientConfig, Settings
from core.sessions import create_session
from core.protocol import WorkspaceFolder
from core.sessions import Session
from core.protocol import Request, Notification
from core.url import filename_to_uri
from core.logging import set_debug_logging, set_exception_logging, setup_log
import signal
import sys
import time
import shutil
import json
import argparse
import os
INTELEPHENSE = ['node',
'/home/dpc/.npm-global/lib/node_modules/intelephense/lib/intelephense.js',
'--stdio']
PYLS = [shutil.which('pyls')]
WORKING_FILE = '/home/dpc/sandbox/lfx/rplugin/python3/lfx/core/diff.py'
# WORKING_FILE='/home/dpc/Dev/eproc1g/web/modulos/sandbox/tools/TesteDatas.php'
# WORKING_FILE='/home/dpc/Dev/godot/my-circle-jump/Main.gd'
initialized = False
position = [1, 1]
configs = {
'.php': ClientConfig(
name='intelephense',
binary_args=INTELEPHENSE,
# binary_args=[shutil.which('pyls')],
tcp_port=None,
languageId='python',
settings=dict(),
init_options={
'clearCache': False,
'storagePath': '/tmp/foo',
}),
'.py': ClientConfig(
name='pyls',
binary_args=PYLS,
tcp_port=None,
languageId='python',
settings={},
init_options={}),
'.gd': ClientConfig(
name='godot',
binary_args=[],
tcp_port=6008,
languageId='gdscript3',
settings={},
init_options={}),
}
def on_pre_initialize(session):
print('Pre-initialize called')
def on_post_initialize(session: Session):
global initialized
print('Post-initialize called')
initialized = True
print(session.capabilities)
session.client.send_notification(
Notification.initialized())
did_open(working_file)
session.client.on_notification(
'indexingStarted', lambda params: print(params))
session.client.on_notification(
'indexingEnded', lambda params: print(params))
def find_root(file):
head, tail = os.path.split(file)
found = head
while tail != '':
for lookup in ['.gitmodules', '.git']:
lookup = os.path.join(head, lookup)
if os.path.exists(lookup):
found = head
break
head, tail = os.path.split(head)
return found
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='?', type=argparse.FileType('r'),
default=WORKING_FILE)
args = parser.parse_args()
working_file = os.path.abspath(args.file.name)
file_ext = os.path.splitext(working_file)[1]
root_path = find_root(working_file)
root_name = os.path.dirname(root_path)
for key in configs.keys():
if key == file_ext:
config = configs[key]
break
else:
raise KeyError('No config found for this file')
workspace = WorkspaceFolder(root_name, root_path)
# workspace = WorkspaceFolder('eproc1g', '/home/dpc/sandbox/sublime-lsp/')
settings = Settings()
settings.log_stderr = True
settings.log_payloads = True
set_debug_logging(True)
set_exception_logging(True)
setup_log()
current_file = None
session = create_session(
config=config,
workspace_folders=[workspace],
env=dict(),
settings=settings,
on_post_initialize=on_post_initialize,
on_pre_initialize=on_pre_initialize,
on_stderr_log=lambda what: print(what))
def signal_handler(sig, frame):
session.end()
signal.signal(signal.SIGINT, signal_handler)
# Handlers
def request_handler(res):
print(res)
def error_handler(error):
print(error)
# Params
def text_document_position_params():
return {
'textDocument': {
'uri': filename_to_uri(working_file)
},
'position': {
'line': position[0] - 1,
'character': position[1] - 1
}
}
# Util
def print_position():
with open(working_file, 'r', encoding='latin1') as file:
lines = file.readlines()
print(lines[position[0] - 1].rstrip())
print('{}^'.format(' ' * (position[1] - 1)))
# Requests/notifications
def did_close(path):
session.client.send_notification(
Notification.didClose({
'textDocument': {
'uri': filename_to_uri(path)
}
})
)
def did_open(path):
global current_file
if current_file:
did_close(current_file)
with open(path, 'r', encoding='latin1') as file:
text = file.read()
session.client.send_notification(
Notification.didOpen({
'textDocument': {
'uri': filename_to_uri(path),
'languageId': 'php',
'version': 1,
'text': text
}}
))
current_file = path
def workspace_symbol():
query = input('Symbol: ')
session.client.execute_request(
Request.workspaceSymbol({'query': query}),
request_handler,
error_handler,
300)
def document_symbol():
session.client.execute_request(
Request.documentSymbols({
'textDocument': {
'uri': filename_to_uri(working_file)
}
}),
request_handler,
error_handler,
300)
def hover():
session.client.execute_request(
Request.hover(text_document_position_params()),
request_handler,
error_handler,
300)
def completion():
session.client.execute_request(
Request.complete(text_document_position_params()),
lambda res: print(json.dumps(res, indent=4)),
error_handler,
300)
def references():
params = text_document_position_params()
params['context'] = {'includeDeclaration' : False}
session.client.execute_request(
Request.references(params),
lambda res: print(json.dumps(res, indent=4)),
error_handler,
10)
def goto_definition():
session.client.execute_request(
Request.definition(text_document_position_params()),
lambda res: print(json.dumps(res, indent=4)),
error_handler,
300)
def signature_help():
session.client.execute_request(
Request.signatureHelp(text_document_position_params()),
lambda res: print(json.dumps(res, indent=4)),
error_handler,
10)
def set_position():
line = input('Line: ')
col = input('Col: ')
position[0] = int(line)
position[1] = int(col)
print('Position set to ', position)
print_position()
def open_file():
file = input('File: ')
print('Opening file {}...'.format(file))
did_open(file)
def end():
session.end()
sys.exit(0)
menu_items=[
['c', 'Completion', completion],
['h', 'Hover', hover],
['s', 'Document symbols', document_symbol],
['w', 'Workspace symbols', workspace_symbol],
['d', 'Go to definition', goto_definition],
['r', 'Find referenes', references],
['S', 'Signature help', signature_help],
['p', 'Set position', set_position],
['o', 'Open file', open_file],
['q', 'Exit', end]]
def show_menu():
print('-' * 70)
print('Choose an option:')
for i, item in enumerate(menu_items):
print('({}) {}'.format(item[0], item[1]))
while True:
if not initialized:
time.sleep(1.0)
continue
show_menu()
res = input('---> ')
for item in menu_items:
if item[0] == res:
cmd = item[2]
cmd()
break
else:
print('Invalid choice!')
| 25.687075 | 90 | 0.620895 |
a1d0245a8cca7577ca292fa843f885bd44bba2c6 | 4,437 | py | Python | gdmtl/datasets/assembler.py | binshengliu/gdmtl | fb8bfe0e87bbd6d8535cc8449012fb4119430d4c | [
"MIT"
] | null | null | null | gdmtl/datasets/assembler.py | binshengliu/gdmtl | fb8bfe0e87bbd6d8535cc8449012fb4119430d4c | [
"MIT"
] | null | null | null | gdmtl/datasets/assembler.py | binshengliu/gdmtl | fb8bfe0e87bbd6d8535cc8449012fb4119430d4c | [
"MIT"
] | 1 | 2022-02-26T00:49:03.000Z | 2022-02-26T00:49:03.000Z | from __future__ import annotations
from itertools import zip_longest
from typing import Dict, Generic, List, Optional, Sequence, TypeVar
import torch
from irtools.pad import pad_batch
from transformers import PreTrainedTokenizer
T = TypeVar("T", str, List[int])
class Assembler(Generic[T]):
"""Assemble pre-tokenized ids into shapes accepted by transformer models. This assembler
should only be used for fine-tuning as it disregards different tokenizers'
assembling logic and enforce a unified assembling approach. Using it for pre-trained
model inference would result in undefined behavior.
Single: [bos] + [prefix] + ids + [eos] + [optional pad]
Paired: [bos] + [prefix] + first_ids + [sep] + second_ids [eos] + [optional pad]
The `bos`, `sep`, `eos`, and `pad` tokens are self-explained and must be single
integer.
The `prefix` is used as task identifier and can be a list of integers. The same
model may be used for ranking, summarization, or other tasks.
"""
def __init__(
self,
*, # Enforcing keyword arguments for clarity
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
prefix_token_ids: Optional[T] = None,
suffix_token_ids: Optional[T] = None,
pad_to_max_length: bool = True,
add_special_tokens: bool = True,
return_special_tokens_mask: bool = False,
return_token_type_ids: Optional[bool] = True,
):
if pad_to_max_length:
assert isinstance(max_length, int)
self._tokenizer = tokenizer
self._max_length = max_length
self._prefix: Optional[T] = prefix_token_ids
if isinstance(self._prefix, str):
self._prefix = self._prefix.strip()
if not self._prefix.endswith(">"):
self._prefix += " "
self._suffix: Optional[T] = suffix_token_ids
if isinstance(self._suffix, str) and not self._suffix.startswith(" "):
self._suffix = self._suffix.strip()
if not self._suffix.startswith("<"):
self._suffix = " " + self._suffix
self._pad_to_max_length = pad_to_max_length
self._add_special_tokens = add_special_tokens
self._return_special_tokens_mask = return_special_tokens_mask
self._return_token_type_ids = return_token_type_ids
self._pad_values = {
"input_ids": self._tokenizer.pad_token_id,
"attention_mask": 0,
"token_type_ids": self._tokenizer.pad_token_type_id,
"special_tokens_mask": 1,
}
def batch_assemble(
self,
first_ids_seq: Sequence[T],
second_ids_seq: Optional[Sequence[T]] = None,
) -> Dict[str, torch.Tensor]:
if second_ids_seq is None:
if self._prefix is not None:
first_ids_seq = [self._prefix + x for x in first_ids_seq]
if self._suffix is not None:
first_ids_seq = [x + self._suffix for x in first_ids_seq]
second_ids_seq = []
else:
if self._prefix is not None:
first_ids_seq = [self._prefix + x for x in first_ids_seq]
if self._suffix is not None:
second_ids_seq = [x + self._suffix for x in second_ids_seq]
outputs: List[Dict[str, torch.Tensor]] = []
for first_ids, second_ids in zip_longest(first_ids_seq, second_ids_seq):
output = self._tokenizer.encode_plus(
first_ids,
second_ids,
add_special_tokens=self._add_special_tokens,
max_length=self._max_length,
pad_to_max_length=self._pad_to_max_length,
return_tensors="pt",
return_token_type_ids=self._return_token_type_ids,
return_attention_mask=True,
truncation="longest_first",
return_special_tokens_mask=self._return_special_tokens_mask,
)
assert all(v.size(0) == 1 for v in output.values())
outputs.append({k: v[0] for k, v in output.items()})
encoded_output: Dict[str, torch.Tensor] = {}
for key in outputs[0]:
padded = pad_batch(
[one[key] for one in outputs], value=self._pad_values[key]
)
tensor = torch.stack(padded)
encoded_output[key] = tensor
return encoded_output
| 38.921053 | 92 | 0.626549 |
21b62ee9ac048a021f53d66d14707f3bf6649f00 | 38,120 | py | Python | api/data/constants/essence.py | XIIIsiren/CommunityAPI | e665638d2800b71b3d32d49c6897901f4c49a9c5 | [
"Apache-2.0"
] | 1 | 2021-06-15T07:31:13.000Z | 2021-06-15T07:31:13.000Z | api/data/constants/essence.py | XIIIsiren/CommunityAPI | e665638d2800b71b3d32d49c6897901f4c49a9c5 | [
"Apache-2.0"
] | 1 | 2021-06-01T10:14:32.000Z | 2021-06-02T10:54:12.000Z | api/data/constants/essence.py | XIIIsiren/CommunityAPI | e665638d2800b71b3d32d49c6897901f4c49a9c5 | [
"Apache-2.0"
] | 2 | 2021-06-01T10:59:15.000Z | 2021-06-03T18:29:36.000Z | ESSENCE_DICT = {
"SOUL_WHIP": {
"type": "Wither",
"dungeonize": 300,
"1": 200,
"2": 300,
"3": 400,
"4": 550,
"5": 750
},
"JERRY_STAFF": {
"type": "Wither",
"dungeonize": 0,
"1": 5,
"2": 10,
"3": 25,
"4": 50,
"5": 100
},
"CRYPT_WITHERLORD_SWORD": {
"type": "Wither",
"1": 15,
"2": 25,
"3": 40,
"4": 60,
"5": 100
},
"BONZO_STAFF": {
"type": "Wither",
"1": 5,
"2": 10,
"3": 25,
"4": 50,
"5": 100
},
"BONZO_MASK": {
"type": "Wither",
"1": 5,
"2": 10,
"3": 25,
"4": 50,
"5": 100
},
"STONE_BLADE": {
"type": "Wither",
"1": 15,
"2": 25,
"3": 40,
"4": 60,
"5": 100
},
"ADAPTIVE_HELMET": {
"type": "Wither",
"1": 10,
"2": 25,
"3": 50,
"4": 100,
"5": 150
},
"ADAPTIVE_CHESTPLATE": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"ADAPTIVE_LEGGINGS": {
"type": "Wither",
"1": 10,
"2": 25,
"3": 50,
"4": 100,
"5": 200
},
"ADAPTIVE_BOOTS": {
"type": "Wither",
"1": 10,
"2": 25,
"3": 50,
"4": 100,
"5": 150
},
"SILENT_DEATH": {
"type": "Wither",
"1": 5,
"2": 10,
"3": 25,
"4": 50,
"5": 100
},
"CONJURING_SWORD": {
"type": "Wither",
"1": 5,
"2": 10,
"3": 25,
"4": 50,
"5": 100
},
"SPIRIT_SWORD": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"ITEM_SPIRIT_BOW": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"THORNS_BOOTS": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"SPIRIT_MASK": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"BONE_BOOMERANG": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"BAT_WAND": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"LAST_BREATH": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"SHADOW_ASSASSIN_HELMET": {
"type": "Wither",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"SHADOW_ASSASSIN_CHESTPLATE": {
"type": "Wither",
"1": 35,
"2": 75,
"3": 150,
"4": 300,
"5": 500
},
"SHADOW_ASSASSIN_LEGGINGS": {
"type": "Wither",
"1": 30,
"2": 60,
"3": 120,
"4": 250,
"5": 400
},
"SHADOW_ASSASSIN_BOOTS": {
"type": "Wither",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
},
"SHADOW_FURY": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 400,
"5": 800
},
"LIVID_DAGGER": {
"type": "Wither",
"1": 30,
"2": 60,
"3": 120,
"4": 250,
"5": 400
},
"FLOWER_OF_TRUTH": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 400,
"5": 800
},
"FEL_SWORD": {
"type": "Wither",
"1": 15,
"2": 25,
"3": 40,
"4": 60,
"5": 100
},
"WITHER_CLOAK": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 300,
"4": 600,
"5": 1200
},
"PRECURSOR_EYE": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 300,
"4": 600,
"5": 1200
},
"GIANTS_SWORD": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 300,
"4": 600,
"5": 1200
},
"NECROMANCER_LORD_HELMET": {
"type": "Wither",
"1": 30,
"2": 60,
"3": 120,
"4": 250,
"5": 400
},
"NECROMANCER_LORD_CHESTPLATE": {
"type": "Wither",
"1": 70,
"2": 150,
"3": 250,
"4": 450,
"5": 750
},
"NECROMANCER_LORD_LEGGINGS": {
"type": "Wither",
"1": 35,
"2": 75,
"3": 150,
"4": 300,
"5": 500
},
"NECROMANCER_LORD_BOOTS": {
"type": "Wither",
"1": 30,
"2": 60,
"3": 120,
"4": 250,
"5": 400
},
"NECROMANCER_SWORD": {
"type": "Wither",
"1": 70,
"2": 150,
"3": 250,
"4": 450,
"5": 750
},
"NECRON_BLADE": {
"type": "Wither",
"1": 150,
"2": 300,
"3": 500,
"4": 900,
"5": 1500
},
"HYPERION": {
"type": "Wither",
"1": 150,
"2": 300,
"3": 500,
"4": 900,
"5": 1500
},
"VALKYRIE": {
"type": "Wither",
"1": 150,
"2": 300,
"3": 500,
"4": 900,
"5": 1500
},
"SCYLLA": {
"type": "Wither",
"1": 150,
"2": 300,
"3": 500,
"4": 900,
"5": 1500
},
"ASTRAEA": {
"type": "Wither",
"1": 150,
"2": 300,
"3": 500,
"4": 900,
"5": 1500
},
"WITHER_HELMET": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"WITHER_CHESTPLATE": {
"type": "Wither",
"1": 100,
"2": 200,
"3": 350,
"4": 600,
"5": 1000
},
"WITHER_LEGGINGS": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 250,
"4": 400,
"5": 700
},
"WITHER_BOOTS": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"TANK_WITHER_HELMET": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"TANK_WITHER_CHESTPLATE": {
"type": "Wither",
"1": 100,
"2": 200,
"3": 350,
"4": 600,
"5": 1000
},
"TANK_WITHER_LEGGINGS": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 250,
"4": 400,
"5": 700
},
"TANK_WITHER_BOOTS": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"SPEED_WITHER_HELMET": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"SPEED_WITHER_CHESTPLATE": {
"type": "Wither",
"1": 100,
"2": 200,
"3": 350,
"4": 600,
"5": 1000
},
"SPEED_WITHER_LEGGINGS": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 250,
"4": 400,
"5": 700
},
"SPEED_WITHER_BOOTS": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"WISE_WITHER_HELMET": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"WISE_WITHER_CHESTPLATE": {
"type": "Wither",
"1": 100,
"2": 200,
"3": 350,
"4": 600,
"5": 1000
},
"WISE_WITHER_LEGGINGS": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 250,
"4": 400,
"5": 700
},
"WISE_WITHER_BOOTS": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"POWER_WITHER_HELMET": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"POWER_WITHER_CHESTPLATE": {
"type": "Wither",
"1": 100,
"2": 200,
"3": 350,
"4": 600,
"5": 1000
},
"POWER_WITHER_LEGGINGS": {
"type": "Wither",
"1": 75,
"2": 150,
"3": 250,
"4": 400,
"5": 700
},
"POWER_WITHER_BOOTS": {
"type": "Wither",
"1": 50,
"2": 100,
"3": 200,
"4": 350,
"5": 500
},
"RUNAANS_BOW": {
"type": "Spider",
"dungeonize": 500,
"1": 50,
"2": 75,
"3": 100,
"4": 125,
"5": 150
},
"LEAPING_SWORD": {
"type": "Spider",
"dungeonize": 20,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"SILK_EDGE_SWORD": {
"type": "Spider",
"dungeonize": 20,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"SPIDER_HAT": {
"type": "Spider",
"dungeonize": 20,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"SPIDER_BOOTS": {
"type": "Spider",
"dungeonize": 20,
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"MOSQUITO_BOW": {
"type": "Spider",
"dungeonize": 30,
"1": 15,
"2": 25,
"3": 35,
"4": 45,
"5": 65
},
"TARANTULA_HELMET": {
"type": "Spider",
"dungeonize": 30,
"1": 12,
"2": 25,
"3": 35,
"4": 45,
"5": 65
},
"TARANTULA_CHESTPLATE": {
"type": "Spider",
"dungeonize": 30,
"1": 12,
"2": 25,
"3": 35,
"4": 45,
"5": 65
},
"TARANTULA_LEGGINGS": {
"type": "Spider",
"dungeonize": 20,
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"TARANTULA_BOOTS": {
"type": "Spider",
"dungeonize": 20,
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"SCORPION_BOW": {
"type": "Spider",
"dungeonize": 20,
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"PHANTOM_ROD": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 300,
"3": 400,
"4": 500,
"5": 600
},
"WEREWOLF_HELMET": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 300,
"3": 400,
"4": 500,
"5": 600
},
"WEREWOLF_CHESTPLATE": {
"type": "Undead",
"dungeonize": 1000,
"1": 400,
"2": 600,
"3": 800,
"4": 1000,
"5": 1200
},
"WEREWOLF_LEGGINGS": {
"type": "Undead",
"dungeonize": 750,
"1": 300,
"2": 450,
"3": 600,
"4": 750,
"5": 900
},
"WEREWOLF_BOOTS": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 300,
"3": 400,
"4": 500,
"5": 600
},
"PIGMAN_SWORD": {
"type": "Undead",
"dungeonize": 200,
"1": 100,
"2": 200,
"3": 300,
"4": 400,
"5": 500
},
"UNDEAD_SWORD": {
"type": "Undead",
"dungeonize": 10,
"1": 20,
"2": 30,
"3": 40,
"4": 50,
"5": 60
},
"ZOMBIE_SWORD": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"ORNATE_ZOMBIE_SWORD": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"FLORID_ZOMBIE_SWORD": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"SKELETON_HAT": {
"type": "Undead",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 40,
"4": 50,
"5": 60
},
"ZOMBIE_HAT": {
"type": "Undead",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 40,
"4": 50,
"5": 60
},
"ZOMBIE_HEART": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"ZOMBIE_CHESTPLATE": {
"type": "Undead",
"dungeonize": 200,
"1": 40,
"2": 60,
"3": 80,
"4": 100,
"5": 120
},
"ZOMBIE_LEGGINGS": {
"type": "Undead",
"dungeonize": 150,
"1": 40,
"2": 60,
"3": 80,
"4": 100,
"5": 120
},
"ZOMBIE_BOOTS": {
"type": "Undead",
"dungeonize": 120,
"1": 35,
"2": 50,
"3": 65,
"4": 80,
"5": 100
},
"SKELETON_HELMET": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"MACHINE_GUN_BOW": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"CRYPT_BOW": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"CRYPT_DREADLORD_SWORD": {
"type": "Undead",
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"CRYPT_WITHERLORD_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"CRYPT_WITHERLORD_CHESTPLATE": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"CRYPT_WITHERLORD_LEGGINGS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"CRYPT_WITHERLORD_BOOTS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SKELETON_GRUNT_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SKELETON_GRUNT_CHESTPLATE": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SKELETON_GRUNT_LEGGINGS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SKELETON_GRUNT_BOOTS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SNIPER_BOW": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SNIPER_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"ROTTEN_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"ROTTEN_CHESTPLATE": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"ROTTEN_LEGGINGS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"ROTTEN_BOOTS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"UNDEAD_BOW": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"STONE_CHESTPLATE": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"MENDER_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"DARK_GOGGLES": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"HEAVY_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"HEAVY_CHESTPLATE": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"HEAVY_LEGGINGS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"HEAVY_BOOTS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SUPER_HEAVY_HELMET": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SUPER_HEAVY_CHESTPLATE": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SUPER_HEAVY_LEGGINGS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SUPER_HEAVY_BOOTS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"STINGER_BOW": {
"type": "Undead",
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"BOUNCY_HELMET": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"BOUNCY_CHESTPLATE": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"BOUNCY_LEGGINGS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"BOUNCY_BOOTS": {
"type": "Undead",
"1": 10,
"2": 20,
"3": 40,
"4": 80,
"5": 160
},
"SKELETON_MASTER_HELMET": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_MASTER_CHESTPLATE": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_MASTER_LEGGINGS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_MASTER_BOOTS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_SOLDIER_HELMET": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_SOLDIER_CHESTPLATE": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_SOLDIER_LEGGINGS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETON_SOLDIER_BOOTS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"ZOMBIE_SOLDIER_HELMET": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"ZOMBIE_SOLDIER_CHESTPLATE": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"ZOMBIE_SOLDIER_LEGGINGS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"ZOMBIE_SOLDIER_BOOTS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"ZOMBIE_KNIGHT_HELMET": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 160,
"5": 320
},
"ZOMBIE_KNIGHT_CHESTPLATE": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 160,
"5": 320
},
"ZOMBIE_KNIGHT_LEGGINGS": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 160,
"5": 320
},
"ZOMBIE_KNIGHT_BOOTS": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 160,
"5": 320
},
"ZOMBIE_KNIGHT_SWORD": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 160,
"5": 320
},
"ZOMBIE_COMMANDER_HELMET": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_COMMANDER_CHESTPLATE": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_COMMANDER_LEGGINGS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_COMMANDER_BOOTS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_COMMANDER_WHIP": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_LORD_HELMET": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_LORD_CHESTPLATE": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_LORD_LEGGINGS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"ZOMBIE_LORD_BOOTS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SKELETON_LORD_HELMET": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SKELETON_LORD_CHESTPLATE": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SKELETON_LORD_LEGGINGS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SKELETON_LORD_BOOTS": {
"type": "Undead",
"1": 25,
"2": 50,
"3": 100,
"4": 200,
"5": 400
},
"SKELETOR_HELMET": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETOR_CHESTPLATE": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETOR_LEGGINGS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"SKELETOR_BOOTS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"ZOMBIE_SOLDIER_CUTLASS": {
"type": "Undead",
"1": 15,
"2": 30,
"3": 60,
"4": 120,
"5": 240
},
"METAL_CHESTPLATE": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 150,
"5": 300
},
"MENDER_FEDORA": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 150,
"5": 300
},
"SHADOW_GOGGLES": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 150,
"5": 300
},
"SUPER_UNDEAD_BOW": {
"type": "Undead",
"1": 20,
"2": 40,
"3": 80,
"4": 150,
"5": 300
},
"EARTH_SHARD": {
"type": "Undead",
"1": 50,
"2": 100,
"3": 200,
"4": 400,
"5": 600
},
"STEEL_CHESTPLATE": {
"type": "Undead",
"1": 40,
"2": 80,
"3": 150,
"4": 300,
"5": 600
},
"MENDER_CROWN": {
"type": "Undead",
"1": 40,
"2": 80,
"3": 150,
"4": 300,
"5": 600
},
"WITHER_GOGGLES": {
"type": "Undead",
"1": 40,
"2": 80,
"3": 150,
"4": 300,
"5": 600
},
"DEATH_BOW": {
"type": "Undead",
"1": 40,
"2": 80,
"3": 150,
"4": 300,
"5": 600
},
"CRYSTALLIZED_HEART": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"REVIVED_HEART": {
"type": "Undead",
"dungeonize": 100,
"1": 30,
"2": 45,
"3": 60,
"4": 75,
"5": 90
},
"REAPER_MASK": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 400,
"3": 600,
"4": 850,
"5": 1200
},
"REAPER_SCYTHE": {
"type": "Undead",
"dungeonize": 200,
"1": 300,
"2": 450,
"3": 600,
"4": 700,
"5": 900
},
"REVENANT_SWORD": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 400,
"3": 600,
"4": 800,
"5": 1000
},
"REAPER_SWORD": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 400,
"3": 600,
"4": 800,
"5": 1000
},
"AXE_OF_THE_SHREDDED": {
"type": "Undead",
"dungeonize": 500,
"1": 200,
"2": 400,
"3": 600,
"4": 800,
"5": 1000
},
"ASPECT_OF_THE_DRAGON": {
"type": "Dragon",
"dungeonize": 150,
"1": 150,
"2": 300,
"3": 450,
"4": 600,
"5": 750
},
"YOUNG_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"YOUNG_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"YOUNG_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"YOUNG_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"OLD_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"OLD_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"OLD_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"OLD_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"STRONG_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"STRONG_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"STRONG_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"STRONG_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"PROTECTOR_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"PROTECTOR_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"PROTECTOR_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"PROTECTOR_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"WISE_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"WISE_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"WISE_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"WISE_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"UNSTABLE_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"UNSTABLE_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"UNSTABLE_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"UNSTABLE_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"SUPERIOR_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 70,
"1": 40,
"2": 60,
"3": 100,
"4": 200,
"5": 300
},
"SUPERIOR_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 100,
"1": 60,
"2": 100,
"3": 160,
"4": 240,
"5": 360
},
"SUPERIOR_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 85,
"1": 50,
"2": 80,
"3": 130,
"4": 220,
"5": 320
},
"SUPERIOR_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 60,
"1": 40,
"2": 60,
"3": 80,
"4": 180,
"5": 280
},
"HOLY_DRAGON_HELMET": {
"type": "Dragon",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"HOLY_DRAGON_CHESTPLATE": {
"type": "Dragon",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"HOLY_DRAGON_LEGGINGS": {
"type": "Dragon",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"HOLY_DRAGON_BOOTS": {
"type": "Dragon",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"TERMINATOR": {
"type": "Dragon",
"dungeonize": 300,
"1": 100,
"2": 200,
"3": 300,
"4": 500,
"5": 750
},
"SINSEEKER_SCYTHE": {
"type": "Dragon",
"dungeonize": 250,
"1": 70,
"2": 140,
"3": 210,
"4": 350,
"5": 600
},
"JUJU_SHORTBOW": {
"type": "Dragon",
"dungeonize": 150,
"1": 30,
"2": 60,
"3": 120,
"4": 200,
"5": 300
},
"MIDAS_STAFF": {
"type": "Gold",
"dungeonize": 100,
"1": 50,
"2": 100,
"3": 150,
"4": 200,
"5": 250
},
"ROGUE_SWORD": {
"type": "Gold",
"dungeonize": 5,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"MIDAS_SWORD": {
"type": "Gold",
"dungeonize": 100,
"1": 25,
"2": 50,
"3": 75,
"4": 125,
"5": 200
},
"SUPER_CLEAVER": {
"type": "Gold",
"1": 1,
"2": 1,
"3": 2,
"4": 2,
"5": 3
},
"HYPER_CLEAVER": {
"type": "Gold",
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"GIANT_CLEAVER": {
"type": "Gold",
"1": 10,
"2": 20,
"3": 30,
"4": 40,
"5": 50
},
"GOLD_BONZO_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"GOLD_SCARF_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"GOLD_PROFESSOR_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"GOLD_THORN_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"GOLD_LIVID_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"GOLD_SADAN_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"GOLD_NECRON_HEAD": {
"type": "Gold",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"HARDENED_DIAMOND_HELMET": {
"type": "Diamond",
"dungeonize": 10,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"HARDENED_DIAMOND_CHESTPLATE": {
"type": "Diamond",
"dungeonize": 10,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"HARDENED_DIAMOND_LEGGINGS": {
"type": "Diamond",
"dungeonize": 10,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"HARDENED_DIAMOND_BOOTS": {
"type": "Diamond",
"dungeonize": 10,
"1": 5,
"2": 10,
"3": 15,
"4": 20,
"5": 25
},
"PERFECT_HELMET_1": {
"type": "Diamond",
"dungeonize": 40,
"1": 20,
"2": 30,
"3": 50,
"4": 100,
"5": 150
},
"PERFECT_CHESTPLATE_1": {
"type": "Diamond",
"dungeonize": 70,
"1": 30,
"2": 50,
"3": 80,
"4": 120,
"5": 180
},
"PERFECT_LEGGINGS_1": {
"type": "Diamond",
"dungeonize": 60,
"1": 25,
"2": 40,
"3": 65,
"4": 110,
"5": 160
},
"PERFECT_BOOTS_1": {
"type": "Diamond",
"dungeonize": 35,
"1": 20,
"2": 30,
"3": 40,
"4": 90,
"5": 140
},
"DIAMOND_BONZO_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"DIAMOND_SCARF_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"DIAMOND_PROFESSOR_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"DIAMOND_THORN_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"DIAMOND_LIVID_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"DIAMOND_SADAN_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"DIAMOND_NECRON_HEAD": {
"type": "Diamond",
"1": 50,
"2": 75,
"3": 125,
"4": 250,
"5": 500
},
"YETI_SWORD": {
"type": "Ice",
"dungeonize": 100,
"1": 100,
"2": 150,
"3": 200,
"4": 250,
"5": 300
},
"FROZEN_BLAZE_HELMET": {
"type": "Ice",
"dungeonize": 400,
"1": 100,
"2": 125,
"3": 150,
"4": 175,
"5": 200
},
"FROZEN_BLAZE_CHESTPLATE": {
"type": "Ice",
"dungeonize": 40,
"1": 10,
"2": 15,
"3": 25,
"4": 35,
"5": 60
},
"FROZEN_BLAZE_LEGGINGS": {
"type": "Ice",
"dungeonize": 60,
"1": 15,
"2": 20,
"3": 30,
"4": 40,
"5": 65
},
"FROZEN_BLAZE_BOOTS": {
"type": "Ice",
"dungeonize": 30,
"1": 5,
"2": 10,
"3": 20,
"4": 30,
"5": 55
},
"FROZEN_SCYTHE": {
"type": "Ice",
"dungeonize": 50,
"1": 10,
"2": 15,
"3": 20,
"4": 25,
"5": 30
},
"ICE_SPRAY_WAND": {
"type": "Ice",
"1": 20,
"2": 50,
"3": 100,
"4": 200,
"5": 300
}
} | 18.649706 | 36 | 0.31393 |
fe2c29bc84b8d2fcc529fc29593ac37f259fbed7 | 13 | py | Python | deplytils/__init__.py | joshuahaertel/putils | cd467560b4e97b1caee9ee2111a0748021744b07 | [
"MIT"
] | null | null | null | deplytils/__init__.py | joshuahaertel/putils | cd467560b4e97b1caee9ee2111a0748021744b07 | [
"MIT"
] | null | null | null | deplytils/__init__.py | joshuahaertel/putils | cd467560b4e97b1caee9ee2111a0748021744b07 | [
"MIT"
] | null | null | null | """putils"""
| 6.5 | 12 | 0.461538 |
e23183ddba3187ac893b4f19cac86853f913ef79 | 929 | py | Python | run/wordsub_task3.py | osmanbaskaya/semeval14-task3 | 2d440cadd701de3cbff3a6be630157d3ade05176 | [
"MIT"
] | 1 | 2015-02-03T20:22:33.000Z | 2015-02-03T20:22:33.000Z | run/wordsub_task3.py | osmanbaskaya/semeval14-task3 | 2d440cadd701de3cbff3a6be630157d3ade05176 | [
"MIT"
] | null | null | null | run/wordsub_task3.py | osmanbaskaya/semeval14-task3 | 2d440cadd701de3cbff3a6be630157d3ade05176 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
import sys
import random
from nltk.corpus import stopwords
initial = sys.argv[1] # L or R
n = int(sys.argv[2]) # number of substitutes
s = int(sys.argv[3]) # seed
random.seed(s)
stop = set(stopwords.words('english'))
map(stop.add, "lrb rrb rsb . ... , .. ? ` ' ! lsb lcb rcb -LRB- -RRB- -RSB- -LSB- -LCB- \
-RCB- @card@ n't 's".split())
c = 1
for line in sys.stdin:
tokens = line.split()
i = 0
word = tokens[0]
if word == '</s>':
c += 1
continue
if word not in stop:
while i < n:
total = 0
for j in range(1, len(tokens), 2):
tok, p = tokens[j], 10 ** float(tokens[j+1])
total += p
if total * random.random() <= p:
sub = tok
print "{}{}\t{}".format(initial, c, sub)
i += 1
| 22.658537 | 89 | 0.488698 |
8f7cba65e894c39ecbec2f6660cb8c6f83b646b2 | 4,814 | py | Python | src/main.py | DickKemp/Blockus | e720ce5a50e001c9f0316adc16c30b603ba92c9b | [
"MIT"
] | null | null | null | src/main.py | DickKemp/Blockus | e720ce5a50e001c9f0316adc16c30b603ba92c9b | [
"MIT"
] | 1 | 2022-02-08T03:29:52.000Z | 2022-02-08T03:29:52.000Z | src/main.py | DickKemp/Blockus | e720ce5a50e001c9f0316adc16c30b603ba92c9b | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Set
from blokus.blokus import Blok, OverlapException, UnequalEdgesException
from blokus.canvas import Canvas, CompositeShape, PolygonShape, Style, Box
from blokus.point import Point
import math
import time
def gen_next_level(levelset, b1, is_symmetric=False) -> Set[Blok]:
unique_blocks = set()
cntr = 0
# if the piece we are adding is symmetric, namely all edges are indistinguiable from one another,
# then we only need to attach one edge, there is no need to try all the edges as it would be the same
if is_symmetric:
b1_edges = [0]
else:
b1_edges = range(Blok.num_edges(b1))
for b0 in iter(levelset):
for b0e in range(Blok.num_edges(b0)):
for b1e in b1_edges:
try:
(b,bm) = Blok.align_blocks_on_edge(b0, b0e, b1, b1e)
newb = Blok.merge(b, bm)
flip_edge = Blok.find_flip_edge(newb)
flipped_newb = Blok.flip(newb, flip_edge)
cntr = cntr + 1
newb_not_in = newb not in unique_blocks
flipped_newb_not_in = flipped_newb not in unique_blocks
if newb_not_in and flipped_newb_not_in:
# print(f"adding newb: {newb}: id:{newb.gen_id()}, flipped_newb_id: {flipped_newb.gen_id()}")
# print(f"newb: {newb}")
unique_blocks.add(newb)
except OverlapException:
continue
except UnequalEdgesException:
continue
return unique_blocks
levels_cache = {}
def runn(name, basic, num_levels, svg_file, is_symmetric = False):
start_time = time.time()
b0 = Blok(basic)
b1 = Blok.rotate(b0, Blok.get_edge(b0,0).start_pt, math.pi/4)
prev_level = set()
prev_level.add(b0)
all = [(1,b0)]
for lev in range(num_levels-1):
cache = levels_cache.get((name,lev), None)
if cache:
curr_level = cache
else:
curr_level = gen_next_level(prev_level, b1, is_symmetric)
levels_cache[(name,lev)] = curr_level
for s in iter(curr_level):
all.append((lev+2,s))
prev_level = curr_level
normalized_bloks = [(n, Blok.normalize(b)) for n,b in all]
bigbox = Box.bounding_box_of_boxex([PolygonShape(b.points).bounding_box() for _,b in normalized_bloks])
d = int(math.sqrt(len(all)))
cv = Canvas(width=20, height=20, nrows=d+2, ncols=d+2)
i = 0
for n,b in normalized_bloks:
c = int(i / d)
r = int(i % d)
box = cv.get_box_for_cell(r, c)
sh = PolygonShape(b.points, style=Style(color='black', size='0.005'))
if b.component_blocks:
component_polygons = [PolygonShape(bp.points, style=Style(color='red')) for bp in b.component_blocks] + [sh]
sh = CompositeShape(component_polygons)
cv = Canvas.add_shape3(cv, sh, box, bigbox, margin_percent=0.3, label=f"{str(n)}")
i = i + 1
with open(svg_file, 'w') as fd:
Canvas.render_as_svg(cv, file=fd)
print(f"{name}: Num shapes: {len(all)} --- Time: {(time.time() - start_time)} seconds ---")
def pentagon(r):
sum_interior_angles = (5-2)*math.pi
pent_angle = ((sum_interior_angles)/5)/2
ND = r * math.cos(pent_angle)
ON = r * math.sin(pent_angle)
BC = 2 * ND
FC = BC * math.sin(pent_angle)
BF = BC * math.cos(pent_angle)
FO = r - BF
A = (-FC,FO)
B = (0.0, r)
C = (FC, FO)
D = (ND, -ON)
E = (-ND, -ON)
return [A,B,C,D,E]
if __name__ == '__main__':
SQUARE = [Point(0.0,0.0), Point(1.0,0.0), Point(1.0,1.0), Point(0.0,1.0)]
TRIANGLE = [Point(0.0, 0.0), Point(0.5,(1/2)*math.sqrt(3)),Point(1.0,0.0)]
ISOTRIANGLE = [Point(0.0, 0.0), Point(1.0, 1.0), Point(1.0,0.0)]
PENTAGON = [Point(x,y) for (x,y) in pentagon(1.0)]
h = math.sin(math.pi/3.0)
hex = [ (-0.5,-h), (-1,0), (-0.5,h), (0.5,h), (1,0), (0.5,-h) ]
HEXAGON = [Point(x,y) for (x,y) in hex]
is_symmetric = True
start_time = time.time()
for i in range(1,8):
loop_time = time.time()
print(f"\tStart LOOP {i}")
runn("\t\tTRIANGLE", TRIANGLE, i, f"svg/triangle{i}.svg", is_symmetric)
runn("\t\tSQUARE", SQUARE, i, f"svg/square{i}.svg", is_symmetric)
runn("\t\tPENTAGON", PENTAGON, i, f"svg/pentagon{i}.svg", is_symmetric)
runn("\t\tHEXAGON", HEXAGON, i, f"svg/hexagon{i}.svg", is_symmetric)
# runn("ISOTRIANGLE", ISOTRIANGLE, i, f"svg/iso_triangle{i}.svg")
print(f"\tEnd LOOP {i} --- Time: {(time.time() - loop_time)} seconds ---")
pass
print(f"TOTAL --- Time: {(time.time() - start_time)} seconds ---")
| 38.822581 | 120 | 0.582052 |
eab18eb9b090b2f98580b0991568c0cffa072c0a | 2,366 | py | Python | tightai/utils.py | tightai/tightai | 3a440ad780f5c7ff84a54ad9dc6b342ce3b420b6 | [
"Apache-2.0"
] | 1 | 2020-09-13T08:10:59.000Z | 2020-09-13T08:10:59.000Z | tightai/utils.py | tightai/tightai | 3a440ad780f5c7ff84a54ad9dc6b342ce3b420b6 | [
"Apache-2.0"
] | 1 | 2022-02-26T08:32:58.000Z | 2022-02-26T08:32:58.000Z | tightai/utils.py | tightai/tightai | 3a440ad780f5c7ff84a54ad9dc6b342ce3b420b6 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 10_utils.ipynb (unless otherwise specified).
__all__ = ['resolve_path', 'print_dict', 'sort_by_key', 'sec_to_hours', 'to_datetime', 'format_date']
# Cell
import pathlib
# Cell
def resolve_path(path):
return pathlib.Path(path).resolve()
# Cell
def print_dict(item_dict):
for k, v in item_dict.items():
if isinstance(v, dict):
print(f"{k}:")
print_dict(v)
elif isinstance(v, list):
print(f"{k}:")
for v1 in v:
print(f"\t- {v1}")
else:
print(f"{k}: {v}")
# Cell
def sort_by_key(items=[], key=None):
reverse = False
if key.startswith("-"):
key = key[1:]
reverse = True
if key == None:
return items
if len(items) ==0:
return []
_item = items[0]
valid_keys = []
if isinstance(_item, dict):
print("This method is for sorting lists of objects and not `dict`.")
return items
if isinstance(_item, set):
print("This method is for sorting lists of objects and not `set`.")
return items
if not isinstance(_item, object):
print("This method is for sorting lists of objects.")
return items
try:
valid_keys += list(set(_item.__dict__.keys()))
except:
pass
if key not in list(valid_keys):
valid_str = ""
msg = f"{key} is not a valid sorting option."
if len(valid_keys) > 0:
valid_str = ", ".join(valid_keys)
msg = f"{msg} Choices are: {valid_str}"
else:
msg = f"{msg} Are you sure that list of items with the Python class `{str(_item.__class__.__name__)}` can be sorted?"
raise Exception(msg)
items = sorted(items, key=lambda x: getattr(x, key), reverse=reverse)
return items
# Cell
from datetime import datetime, timedelta
def sec_to_hours(seconds):
a=str(int(seconds//3600))
b=str(int(seconds%3600)//60)
c=str(int((seconds%3600)%60))
d= f"{a} hours {b} mins {c} seconds ago."
return d
def to_datetime(datetime_str):
try:
datetime_str = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S.%fz")
valid = True
except:
valid = False
return datetime_str, valid
def format_date(datetime_obj):
return datetime_obj.strftime('%d %b %Y') | 27.195402 | 129 | 0.591293 |
e18594963a3614f54e2a9e22384bd35c698602e6 | 8,741 | py | Python | expandseq/__main__.py | jrowellfx/expandSeq | c5f5677d684fa8ba7a4e10b1346aaaa9a32e0cdc | [
"BSD-3-Clause"
] | null | null | null | expandseq/__main__.py | jrowellfx/expandSeq | c5f5677d684fa8ba7a4e10b1346aaaa9a32e0cdc | [
"BSD-3-Clause"
] | null | null | null | expandseq/__main__.py | jrowellfx/expandSeq | c5f5677d684fa8ba7a4e10b1346aaaa9a32e0cdc | [
"BSD-3-Clause"
] | null | null | null | # 3-Clause BSD License
#
# Copyright (c) 2008-2021, James Philip Rowell,
# Alpha Eleven Incorporated
# www.alpha-eleven.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Neither the name of the copyright holder, "Alpha Eleven, Inc.",
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# expandseq/condenseseq - two command line utilities that expose the basic
# functionality of the python-module "seqLister.py" functions "expandSeq()"
# and "condenseSeq()". These functions translate back and forth between a
# condensed form for listing sequences of integers and plain lists of integers.
# Lists of integers in this condensed format are commonly used by various
# computer programs dealing with sequences of images such as render farm
# management tools (like smedge), image sequence viewers (like rv) or "ls"
# commands (like lsseq) to list frames from CG-animation or video footage
# which has been saved as a sequence of individually numbered frames.
#
# The "expandseq" and "condenseseq" commands enhance the simple behavior of
# the "expandSeq()" and "condenseSeq()" python functions by adding the ability
# to print out the lists in various forms. eg.; comma, space or newline
# separators as well as sorting the lists, reversing order, and mixing and
# matching expanded and condensed formats as arguments on the command line.
import argparse
import os
import sys
import subprocess
import textwrap
from operator import itemgetter
import seqLister
EXPAND_MODE = True
VERSION = "2.3.0"
def indexNegNumber(argList) :
i = 0
argLen = len(argList)
while i < argLen :
if len(argList[i]) >= 2 :
if argList[i][0] == '-' and argList[i][1].isdigit() :
return i
i += 1
return -1
def main():
# Redefine the exception handling routine so that it does NOT
# do a trace dump if the user types ^C while expandseq or
# condenseseq are running.
#
old_excepthook = sys.excepthook
def new_hook(exceptionType, value, traceback):
if exceptionType != KeyboardInterrupt and exceptionType != IOError:
old_excepthook(exceptionType, value, traceback)
else:
pass
sys.excepthook = new_hook
global EXPAND_MODE
if os.path.basename(sys.argv[0]) == "expandseq" :
EXPAND_MODE = True
elif os.path.basename(sys.argv[0]) == "condenseseq" :
EXPAND_MODE = False
else :
print(os.path.basename(sys.argv[0]), ": must be named either expandseq or condenseseq", sep='', file=sys.stderr)
sys.exit(1)
if EXPAND_MODE :
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Expands a list of integers and integer sequences of the form 'A-B' or
'A-BxN' into a list of integers.
A-BxN means list every Nth integer starting at A ending at the highest
integer less than or equal to B. Numbers will only be listed once
each. That is; '2-4 1-6' yeilds the list '2 3 4 1 5 6'.
Helpful hint: To pass negative numbers as an argument enclose them
with quotes but include a leading space.
For example:
" -12" or " -99-86"
Allows you to pass a minus-twelve, or minus-ninety-nine through
eighty-six.
(Also see condenseseq).
'''),
usage="%(prog)s [OPTION]... [INTEGER SEQUENCE]...")
else :
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Condenses a list of integers and/or integer sequences of the form
'A-B' or 'A-BxN' into the most minimal sequence format possible to
represent the full list of numbers.
Helpful hint: To pass negative numbers as an argument enclose them
with quotes but include a leading space.
For example:
" -12" or " -99-86"
Allows you to pass a minus-twelve, or minus-ninety-nine through
eighty-six.
(Also see expandseq).
'''),
usage="%(prog)s [OPTION]... [INTEGER SEQUENCE]...")
p.add_argument("--version", action="version", version=VERSION)
p.add_argument("--delimiter", "-d", action="store", type=str,
choices=("comma", "space", "newline"),
dest="seqDelimiter",
metavar="DELIMITER",
default="comma",
help="List successive numbers delimited by a 'comma' (default) or a 'space' or a 'newline'.")
if not EXPAND_MODE : # i.e.; condense
p.add_argument("--onlyOnes", action="store_true",
dest="onlyOnes", default=False,
help="only condense sucessive frames, that is, do not list sequences on 2's, 3's, ... N's")
p.add_argument("--pad", action="store", type=int,
dest="pad", default=1,
metavar="PAD",
help="set the padding of the frame numbers to be <PAD> digits. [default: 1]")
p.add_argument("--reverse", "-r", action="store_true",
dest="reverseList", default=False,
help="reverse the order of the list")
if EXPAND_MODE :
p.add_argument("--sort", "-s", action="store_true",
dest="sortList", default=False,
help="sort the resulting list")
p.add_argument("numSequences", metavar="INTEGER SEQUENCE", nargs="*",
help="is a single integer such as 'A', or a range \
of integers such as 'A-B' (A or B can be negative,\
and A may be greater than B to count backwards), or \
a range on N's such as 'A-BxN' where N is a positive integer.")
# Copy the command line args (except prog name) and convert
# commas into spaces thus making more args.
#
args = p.parse_args()
separateArgs = []
for a in args.numSequences :
for b in a.split(',') :
separateArgs.append(b)
remainingArgs = []
result = seqLister.expandSeq(separateArgs, remainingArgs)
if EXPAND_MODE :
if args.sortList :
result.sort()
# Pad list of integers and turn them into strings before printing.
#
### jpr debug ### formatStr = "{0:0=-" + str(4) + "d}"
formatStr = "{0:0=-" + str(args.pad) + "d}"
paddedFrames = []
for frame in result:
paddedFrames.append(formatStr.format(frame))
result = paddedFrames
else :
if args.onlyOnes :
result = seqLister.condenseSeqOnes(result, args.pad)
else :
result = seqLister.condenseSeq(result, args.pad)
if args.reverseList :
result.reverse()
isFirst = True
for s in result :
if args.seqDelimiter == 'space' :
if not isFirst :
sys.stdout.write(' ')
sys.stdout.write(str(s))
isFirst = False
elif args.seqDelimiter == 'comma' :
if not isFirst :
sys.stdout.write(',')
sys.stdout.write(str(s))
isFirst = False
else : # newline
print(s)
if (args.seqDelimiter == 'comma' or args.seqDelimiter == 'space') and not isFirst :
print()
if __name__ == '__main__':
main()
| 39.731818 | 120 | 0.642604 |
2172ebd6a9e6e15fb223e8b00db7841c94c9b137 | 12,631 | py | Python | CIS/command.py | charlesfoster/covid-illumina-snakemake | fa70f169259c8a0037e83343a3b9afcd1c2f8285 | [
"MIT"
] | 1 | 2022-03-08T00:41:20.000Z | 2022-03-08T00:41:20.000Z | CIS/command.py | charlesfoster/covid-illumina-snakemake | fa70f169259c8a0037e83343a3b9afcd1c2f8285 | [
"MIT"
] | null | null | null | CIS/command.py | charlesfoster/covid-illumina-snakemake | fa70f169259c8a0037e83343a3b9afcd1c2f8285 | [
"MIT"
] | 1 | 2021-11-11T04:50:24.000Z | 2021-11-11T04:50:24.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 15:57:54 2021
@author: cfos
"""
from CIS import __version__
import os
import snakemake
import argparse
import sys
import shutil
import psutil
from datetime import date
from psutil import virtual_memory
from psutil._common import bytes2human
thisdir = os.path.abspath(os.path.dirname(__file__))
TODAY = date.today().strftime("%Y-%m-%d")
def bytesto(bytes, to, bsize=1024):
"""convert bytes to megabytes, etc.
sample code:
print('mb= ' + str(bytesto(314575262000000, 'm')))
sample output:
mb= 300002347.946
"""
a = {"k": 1, "m": 2, "g": 3, "t": 4, "p": 5, "e": 6}
r = float(bytes)
for i in range(a[to]):
r = r / bsize
return r
def main(sysargs=sys.argv[1:]):
print(
"""\033[92m
/^\/^\\ COVID
_|__| O| Illumina
\/ /~ \_/ \\ Pipeline
\____|__________/ \\ Snakemake edition v{}
\_______ \\
`\ \ \\
| | \\
/ / \\
/ / \\\\
/ / \ \\
/ / \ \\
/ / _----_ \ \\
/ / _-~ ~-_ | |
( ( _-~ _--_ ~-_ _/ |
\ ~-____-~ _-~ ~-_ ~-_-~ /
~-_ _-~ ~-_ _-~
~--______-~ ~-___-~ \033[0m
""".format(
__version__
)
)
max_mem = round(bytesto(virtual_memory().available, "m"))
try:
default_kraken = os.environ["KRAKEN2_DEFAULT_DB"]
except:
default_kraken = None
parser = argparse.ArgumentParser(
description="covid-illumina-snakemake: a pipeline for analysis SARS-CoV-2 samples",
usage="""CIS [options] <query_directory> """,
)
parser.add_argument(
"query_directory", nargs="*", help="Path to directory with reads to process."
)
parser.add_argument(
"-c",
"--consensus_freq",
action="store",
required=False,
help="Variant allele frequency threshold for a variant to be incorporated into consensus genome. Default: {}".format(
float(0.90)
),
metavar="<int>",
)
parser.add_argument(
"-i",
"--isolates",
action="store",
required=False,
help="List of isolates to assemble (Default: all isolates in query_directory)",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
required=False,
help="Force overwriting of completed files (Default: files not overwritten)",
)
parser.add_argument(
"-k",
"--kraken2_db",
action="store",
help="kraken2 database. Default: {}".format(default_kraken),
)
parser.add_argument(
"-n", "--dry_run", action="store_true", default=False, help="Dry run only"
)
parser.add_argument(
"-o",
"--outdir",
action="store",
help="Output directory. Default: {}".format(
os.path.join(os.getcwd(), "results", TODAY)
),
)
parser.add_argument(
"-p",
"--print_dag",
action="store_true",
default=False,
help="Save directed acyclic graph (DAG) of workflow",
)
parser.add_argument(
"-r",
"--reference",
action="store",
required=False,
default=os.path.join(thisdir, "bin", "references", "NC_045512.fasta"),
help="Reference genome to use (Default: {})".format(
os.path.join(thisdir, "bin", "NC_045512.fasta")
),
)
parser.add_argument(
"-s",
"--scheme",
action="store",
required=False,
default="midnight",
help="Primer scheme to use: built-in opts are 'midnight', 'swift', 'eden', 'articv4.1', and 'articv3', but if using your own scheme provide the full path to the bed file here (Default: {})".format(
"midnight"
),
)
parser.add_argument(
"-t",
"--threads",
action="store",
help="Number of threads to use",
default=psutil.cpu_count(logical=True),
metavar="<int>",
)
parser.add_argument(
"-v",
"--variant_caller",
action="store",
help="Variant caller to use. Choices: 'lofreq' or 'ivar' Default: 'lofreq'",
default="lofreq",
)
parser.add_argument(
"--keep_reads", action="store_true", help="Keep trimmed reads", default=False
)
parser.add_argument(
"--version",
action="version",
version=f"covid illumina pipeline snakemake edition: {__version__}",
)
parser.add_argument(
"--suffix",
action="store",
help="Suffix used to identify samples from reads. Default: {}".format(
"_L001_R1_001.fastq.gz"
),
metavar="<str>",
)
parser.add_argument(
"--max_memory",
action="store",
help="Maximum memory (in MB) that you would like to provide to snakemake. Default: {}MB".format(
max_mem
),
metavar="<int>",
)
parser.add_argument("--verbose", action="store_true", help="Print junk to screen.")
parser.add_argument("--report", action="store_true", help="Generate report.")
if len(sysargs) < 1:
parser.print_help()
sys.exit(-1)
else:
args = parser.parse_args(sysargs)
args = parser.parse_args()
if not os.path.exists("".join(args.query_directory)):
print(
"#####\n\033[91mError\033[0m: Data directory does not appear to exist\n#####\n"
)
print("Please check your input and try again\n")
print("For help, run: CIS --help\n\n")
sys.exit(-1)
if default_kraken is None and not args.kraken2_db:
print(
"#####\n\033[91mError\033[0m: No kraken2 database supplied, and default database cannot be detected\n#####\n"
)
parser.print_help()
sys.exit(-1)
elif args.kraken2_db:
default_kraken = args.kraken2_db
if args.outdir:
outdir = args.outdir
else:
outdir = os.path.join(os.getcwd(), "results", TODAY)
if not os.path.exists(outdir):
os.makedirs(outdir)
if args.max_memory:
max_mem = {"mem_mb": int(args.max_memory)}
else:
max_mem = {"mem_mb": int(max_mem)}
scheme = os.path.join(thisdir, "bin", "primer_schemes", "midnight.bed")
if args.scheme and os.path.isfile(
os.path.join(thisdir, "bin", "primer_schemes", "{}.bed".format(args.scheme))
):
scheme = os.path.join(
thisdir, "bin", "primer_schemes", "{}.bed".format(args.scheme)
)
else:
scheme = args.scheme
coverage_script = os.path.join(thisdir, "bin", "scripts", "plot_coverage.R")
converter_script = os.path.join(
thisdir, "bin", "scripts", "ivar_variants_to_vcf.py"
)
vcf_script = os.path.join(thisdir, "bin", "scripts", "add_fake_genotype.sh")
suffix = "_L001_R1_001.fastq.gz"
if args.suffix:
suffix = args.suffix
# check input suffix
check_dir = [
x for x in os.listdir("".join(args.query_directory)) if x.endswith(suffix)
]
if len(check_dir) == 0:
print(
"#####\n\033[91mError\033[0m: Specified suffix does not appear to match the reads in your query_directory\n#####\n"
)
print("Please check your input and try again\n")
print("For help, run: CIS --help\n\n")
sys.exit(-1)
if args.isolates:
with open(args.isolates) as f:
isolates = [line.rstrip("\n") for line in f]
else:
isolates = False
if args.variant_caller:
if args.variant_caller not in ["lofreq", "ivar"]:
print(
'#####\n\033[91mError\033[0m: Variant caller must be either "lofreq" or "ivar"\n#####\n'
)
sys.exit(1)
variant_caller = args.variant_caller
else:
variant_caller = "lofreq"
# variant_caller = shutil.which(variant_caller)
# ivar is still necessary for trimming
# if shutil.which('ivar') is None:
# print('#####\n\033[91mError\033[0m: "ivar" is necessary for amplicon trimming but is not in your path\n#####\n')
# sys.exit(1)
if variant_caller is None:
print(
"#####\n\033[91mError\033[0m: {} could not be found in your path\n#####\n".format(
variant_caller
)
)
sys.exit(1)
consensus_freq = 0.9
if args.consensus_freq:
consensus_freq = float(args.consensus_freq)
if consensus_freq > 1 or consensus_freq < 0:
print(
"#####\n\033[91mError\033[0m: The consensus_freq option must be a float number between 0 and 1\n#####\n"
)
sys.exit(1)
if not os.path.isfile(args.reference + ".fai"):
print("Indexing {} with samtools".format(args.reference))
os.system("samtools faidx {} 2> /dev/null".format(args.reference))
if not os.path.isfile(args.reference + ".bwt"):
print("Indexing {} with bwa".format(args.reference))
os.system("bwa index {} 2> /dev/null".format(args.reference))
snakefile = os.path.join(thisdir, "bin", "Snakefile")
config = {
"reads_dir": os.path.join(os.getcwd(), args.query_directory[0]),
"kraken2_db": default_kraken,
"outdir": outdir,
"reference": args.reference,
"isolates": isolates,
"coverage_script": coverage_script,
"converter_script": converter_script,
"vcf_script": vcf_script,
"variant_program": variant_caller,
"scheme": scheme,
"isolates": isolates,
"suffix": suffix,
"threads": args.threads,
"consensus_freq": consensus_freq,
"keep_reads": args.keep_reads,
"verbose": args.verbose,
}
if args.print_dag:
flat_config = []
for key in config:
flat_config.append(key + "=" + str(config[key]))
flat_config = " ".join(flat_config)
cmd = 'snakemake -j1 -s {0} --config {1} --dag | grep -v "No negative control samples detected" | dot -Tpdf > dag.pdf'.format(
os.path.join(thisdir, "bin", "Snakefile"), flat_config
)
os.system(cmd)
status = True
elif args.report:
status = snakemake.snakemake(
snakefile,
report=os.path.join(outdir, "pipeline_report.html"),
use_conda=True,
conda_frontend="mamba",
dryrun=args.dry_run,
printshellcmds=True,
forceall=args.force,
force_incomplete=True,
resources=max_mem,
config=config,
quiet=False,
cores=args.threads,
lock=False,
)
elif config["verbose"]:
print("\n**** CONFIG ****")
for k in config:
print(k + ": ", config[k])
status = snakemake.snakemake(
snakefile,
use_conda=True,
conda_frontend="mamba",
dryrun=args.dry_run,
printshellcmds=True,
forceall=args.force,
force_incomplete=True,
resources=max_mem,
config=config,
quiet=False,
cores=args.threads,
lock=False,
)
else:
status = snakemake.snakemake(
snakefile,
use_conda=True,
conda_frontend="mamba",
dryrun=args.dry_run,
printshellcmds=False,
forceall=args.force,
force_incomplete=True,
resources=max_mem,
config=config,
quiet=True,
cores=args.threads,
lock=False,
)
if status and args.print_dag: # translate "success" into shell exit code of 0
print(
"\033[92m\nResults\033[0m: {}\n\n\033[92mPipeline complete!\033[0m\n".format(
os.path.join(os.getcwd(), "dag.pdf")
)
)
return 0
else:
print(
"\033[92m\nResults\033[0m: {}\n\n\033[92mPipeline complete!\033[0m\n".format(
config["outdir"]
)
)
return 0
return 1
if __name__ == "__main__":
main()
| 31.187654 | 206 | 0.534399 |
69dfc38cc3c167d88b7cc364be4d540176eff86c | 1,735 | py | Python | scripts/create_agent.py | c-keil/SE2-equivariant-grasp-learning | f32230e45c6a7bc1693df9b27d1084d98347477e | [
"MIT"
] | null | null | null | scripts/create_agent.py | c-keil/SE2-equivariant-grasp-learning | f32230e45c6a7bc1693df9b27d1084d98347477e | [
"MIT"
] | null | null | null | scripts/create_agent.py | c-keil/SE2-equivariant-grasp-learning | f32230e45c6a7bc1693df9b27d1084d98347477e | [
"MIT"
] | 1 | 2022-03-14T20:08:18.000Z | 2022-03-14T20:08:18.000Z | from agents.agents_3d.dqn_3d_asr import DQN3DASR
from utils.parameters import *
from networks.equivariant_models_refactor import EquResUReg, EquShiftQ2ResN
def createAgent():
"""
Creates the DQN agent with equivariant q1, q2 networks
"""
if half_rotation:
rz_range = (0, (num_rotations - 1) * np.pi / num_rotations)
else:
rz_range = (0, (num_rotations - 1) * 2 * np.pi / num_rotations)
diag_length = float(heightmap_size) * np.sqrt(2)
diag_length = int(np.ceil(diag_length / 32) * 32)
initialize = initialize_net
if load_sub is not None or load_model_pre is not None:
initialize = False
assert args.use_depth + args.use_rgb > 0
q1_input_channel = 1 * args.use_depth + 3 * args.use_rgb
q2_patch_channel = q1_input_channel
patch_shape = (q2_patch_channel, patch_size, patch_size)
q2_input_shape = (q2_patch_channel + 1, patch_size, patch_size)
is_fcn_si = False
fcn_out = num_primitives
q1 = EquResUReg(q1_input_channel, fcn_out, domain_shape=(1, diag_length, diag_length),
patch_shape=patch_shape, N=equi_n, flip=True, initialize=initialize, is_fcn_si=is_fcn_si,
last_activation_softmax=True).to(device)
q2 = EquShiftQ2ResN(q2_input_shape, num_rotations, num_primitives, kernel_size=7, quotient=False,
last_quotient=True, initialize=initialize).to(device)
agent = DQN3DASR(workspace, heightmap_size, device, lr=lr, num_primitives=num_primitives,
patch_size=patch_size,
num_rz=num_rotations, rz_range=rz_range, network=model)
agent.initNetwork(q1, q2)
agent.detach_es = detach_es
agent.aug = aug
return agent
| 41.309524 | 109 | 0.692795 |
bdbf53c48b90b3b7aee15ced3764bbf92ac1e2a1 | 8,849 | py | Python | restoredb.py | cecton/restoredb.py | fbd97293112b47ecc39204a7845664c9a0981fd1 | [
"MIT"
] | null | null | null | restoredb.py | cecton/restoredb.py | fbd97293112b47ecc39204a7845664c9a0981fd1 | [
"MIT"
] | null | null | null | restoredb.py | cecton/restoredb.py | fbd97293112b47ecc39204a7845664c9a0981fd1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# restoredb
#
# This is a small example on how to use StreamDecompressor to make a
# generic pg_restore-like command.
#
# TL;DR
#
# ~/$ restoredb -d dbname my_dump.pgdump.xz
# # will restore a pgdump compressed in xz
#
# ~/$ restoredb -d dbname my_dump.sql.bz2
# # will restore an SQL dump compressed in bz2
#
# ~/$ restoredb -d dbname my_dump.tar.gz
# # will restore an tar dump compressed in gzip
#
# ~/$ restoredb -d dbname my_dump.7z
# # will restore any dump if the 7z contains only one file
# # and is a pgdump, SQL dump or tar dump
#
# ~/$ ssh toto@foo.bar "cat remote_dump.zip.xz" | restoredb -d dbname
# # will restore any dump if the remote zip file over-compressed in xz
# # contains only one file and is a pgdump, SQL dump or tar dump
#
import io
import os
import sys
import errno
import argparse
import subprocess
import tarfile
import StreamDecompressor
try:
import pgheader
import time
except ImportError:
pgheader = None
__all__ = """
PostgreSQLDump PostgreSQLTarDump PlainSQL pgdump_guesser open parser run
""".split()
class PostgreSQLDump(StreamDecompressor.ExternalPipe):
"""
Stream custom dump to pg_restore in order to get SQL
"""
__command__ = ['pg_restore']
__compression__ = 'pgdmp_custom'
__mimes__ = [
'application/octet-stream',
'binary',
]
__extensions__ = ['dump', 'dmp', 'pgdmp', 'pgdump']
def __init__(self, name, fileobj, toc_pos=0):
if pgheader:
self.header = pgheader.ArchiveHandle(
io.BytesIO(fileobj.peek(16000)[toc_pos:]))
self.header.ReadHead()
else:
self.header = None
self.__command__[0] = self.find_pg_restore()
super(PostgreSQLDump, self).__init__(name, fileobj)
def find_pg_restore(self):
return self.__command__[0]
@classmethod
def __guess__(cls, mime, name, fileobj, toc_pos=0):
realname = super(PostgreSQLDump, cls).__guess__(mime, name, fileobj)
magic = fileobj.peek(5)[toc_pos:]
if not magic[:5] == 'PGDMP':
raise ValueError("not a postgres custom dump")
return realname
class PostgreSQLTarDump(PostgreSQLDump):
"""
Inherit of PostgreSQLDump to add some specific behaviors related to tar.
Note: this decompressor must run prior to the Untar decompressor
"""
__compression__ = 'pgdmp_tar'
__mimes__ = [
'application/x-tar',
]
__extensions__ = ['tar']
def __init__(self, name, fileobj):
super(PostgreSQLTarDump, self)\
.__init__(name, fileobj, toc_pos=tarfile.BLOCKSIZE)
@classmethod
def __guess__(cls, mime, name, fileobj):
if mime not in cls.__mimes__:
raise ValueError("not a tar file")
tarinfo = tarfile.TarInfo.frombuf(
fileobj.peek(tarfile.BLOCKSIZE)[:tarfile.BLOCKSIZE])
if not tarinfo.name == 'toc.dat':
raise ValueError("does not look like a tar dump")
return super(PostgreSQLTarDump, cls).__guess__(
mime, name, fileobj, toc_pos=tarfile.BLOCKSIZE)
class PlainSQL(StreamDecompressor.Archive):
"""
This class only make sure we have text/plain here
"""
__compression__ = 'sql'
__mimes__ = [
'text/plain',
]
__extensions__ = ['dump', 'dmp']
__uniqueinstance__ = True
def __init__(self, name, fileobj):
if isinstance(fileobj, PostgreSQLDump):
self.header = fileobj.header
else:
self.header = None
super(PlainSQL, self).__init__(name, fileobj, fileobj)
pgdump_guesser = StreamDecompressor.Guesser(
extra_decompressors=[
(-10, PostgreSQLTarDump),
( 0, PostgreSQLDump),
( 0, PlainSQL),
],
)
def open(name=None, fileobj=None):
archive = pgdump_guesser.open(name=name, fileobj=fileobj)
if not archive.compressions or archive.compressions[-1] != 'sql':
raise IOError(errno.EPIPE, "Not a PostgreSQL dump")
return archive
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--help', action='store_true')
parser.add_argument('--dbname', '-d', dest='dbname', type=str)
parser.add_argument('--host', '-h',
help="Specifies the TCP port or the local Unix-domain socket file.")
parser.add_argument('--username', '-U',
help="Connect to the database as the user username instead "
"of the default.")
parser.add_argument('--port', '-p', type=int)
parser.add_argument('--no-owner', '-O', action='store_true',
help="Do not output commands to set ownership of objects to match the "
"original database.")
parser.add_argument('--no-privileges', '--no-acl', '-x', action='store_true',
help="Prevent restoration of access privileges (grant/revoke commands).")
parser.add_argument('--clean', '-c', action='store_true',
help="Clean (drop) database objects before recreating them.")
parser.add_argument('--create', '-C', action='store_true',
help="Create the database before restoring into it. If --clean is also "
"specified, drop and recreate the target database before connecting "
"to it.")
parser.add_argument('--no-header', action='store_true',
help="Do not print header (when available)")
parser.add_argument('--debug', action='store_true',
default=bool(os.environ.get('DEBUG', False)))
parser.add_argument('dump', nargs='?')
def warn(*messages):
sys.stderr.write(" ".join(map(unicode, messages))+"\n")
def die(*messages):
warn(*messages)
sys.exit(1)
def debug(*messages):
if args.debug:
warn("debug:", *messages)
def run(args):
if args.help:
parser.print_help()
sys.exit(0)
if args.no_owner:
PostgreSQLDump.__command__.append('--no-owner')
if args.no_privileges:
PostgreSQLDump.__command__.append('--no-privileges')
if args.clean:
PostgreSQLDump.__command__.append('--clean')
if args.create:
PostgreSQLDump.__command__.append('--create')
if args.dump:
try:
archive = open(name=args.dump)
except IOError, exc:
die(args.dump+':', exc.strerror)
else:
try:
archive = open(fileobj=sys.stdin)
except IOError, exc:
die('-:', exc.strerror)
debug("real name:", archive.realname)
debug("compressions:", *archive.compressions)
if archive.header and not args.no_header:
header = dict(archive.header.__dict__,
createDate=time.ctime(archive.header.createDate),
format={
0 : 'UNKNOWN',
1 : 'CUSTOM',
3 : 'TAR',
4 : 'NULL',
5 : 'DIRECTORY',
}[archive.header.format])
sys.stderr.write(
";\n"
"; Archive created at %(createDate)s\n"
"; dbname: %(archdbname)s\n"
"; TOC Entries:\n"
"; Compression: %(compression)s\n"
"; Dump Version: %(vmaj)d.%(vmin)d-%(vrev)d\n"
"; Format: %(format)s\n"
"; Integer: %(intSize)d bytes\n"
"; Offset: %(offSize)d bytes\n"
"; Dumped from database version: %(archiveDumpVersion)s\n"
"; Dumped by pg_dump version: %(archiveRemoteVersion)s\n"
";\n"
% header
)
if 'pgdump' in archive.compressions:
debug("pg_restore arguments:", PostgreSQLDump.__command__)
if (not args.dbname and not os.isatty(sys.stdout.fileno())) or \
args.dbname == '-':
try:
sys.stdout.writelines(archive)
sys.exit(0)
except IOError, e:
archive.close()
die(e.args[1])
else:
command_args = ['psql', '-X',
(args.dbname or archive.realname)]
# psql arguments
if args.host:
command_args += ['--host', args.host]
if args.port:
command_args += ['--port', str(args.port)]
if args.username:
command_args += ['--username', args.username]
debug("command arguments:", command_args)
# NOTE: can't use stdin=archive because it doesn't flush line-by-line
psql = subprocess.Popen(command_args,
stdin=subprocess.PIPE, stdout=io.open(os.devnull))
try:
psql.stdin.writelines(archive)
debug("psql: finished writing lines, closing...")
psql.stdin.close()
except IOError, e:
archive.close()
die(e.args[1])
retcode = psql.wait()
debug("psql exit status:", retcode)
sys.exit(retcode)
if __name__ == '__main__':
args = parser.parse_args()
run(args)
| 31.491103 | 78 | 0.607413 |
da809a59ca07e1ca2608390106867d362000110b | 7,637 | py | Python | lm_pickers/maxcover.py | dulek/alt-tester | d3925e28b2b01ca25706546646b921de90fcd5c8 | [
"MIT"
] | null | null | null | lm_pickers/maxcover.py | dulek/alt-tester | d3925e28b2b01ca25706546646b921de90fcd5c8 | [
"MIT"
] | null | null | null | lm_pickers/maxcover.py | dulek/alt-tester | d3925e28b2b01ca25706546646b921de90fcd5c8 | [
"MIT"
] | null | null | null | from collections import defaultdict
import math
import random
from numpy import random as random_np
from colorama import Fore
from lib import logger
from lib.utils import all_dijkstra_tree, get_lower_bound, get_lm_distances
from lm_picker import LMPicker
LOG = logger.getLogger()
class MaxcoverPicker(LMPicker):
def get_avoid_landmarks(self, lms=None, lm_dists=None, lm_dists_rev=None,
lm_num=10):
lms = lms or []
lm_dists = lm_dists or {}
lm_dists_rev = lm_dists_rev or {}
# And now real picking begins...
for i in range(len(lms), lm_num):
LOG.info('Calculating landmark ' + Fore.RED + '%d' + Fore.RESET +
'...', i)
r = random.choice(self.G.keys())
LOG.debug(' Choosen r=%d.', r)
r_dists, r_tree, r_order = all_dijkstra_tree(r, self.G)
# First calculate "weights".
LOG.info(' Calculating weights...')
weights = {}
for v in self.G.keys():
weights[v] = r_dists[v] - get_lower_bound(lm_dists,
lm_dists_rev, r, v)
LOG.info(' Calculating sizes...')
# Then "sizes" dependent on "weights"
sizes = defaultdict(lambda: 0)
w = None # That's the node of max size
# We're processing the vertices in reversed order of Dijkstra
# algorithm. Basically we're starting on the leaves and going up.
for v in reversed(r_order):
# Traverse subtree of r_tree rooted at v using DFS
Q = [v]
while Q:
u = Q.pop()
# It's possible we already have size of the subtree
if u in sizes:
if sizes[u] == 0:
sizes[v] = 0
break
else:
sizes[v] += sizes[u]
continue
# If subtree has landmark then size is 0
if u in lms:
sizes[v] = 0
break
else:
sizes[v] += weights[u]
for x in r_tree[u]:
Q.append(x)
if w is None or sizes[w] < sizes[v]:
w = v
LOG.info(' Calculating landmark...')
# We have all the sizes calculated, and max one (w). Now we travese
# subtree of r_tree rooted in w. We always choose branch of highest
# size.
while r_tree[w]:
w = max(r_tree[w], key=lambda x: sizes[x])
# Adding leaf as a new landmark
LOG.info(' Calculated node ' + Fore.RED + '%d' + Fore.RESET +
' as landmark.', w)
lms.append(w)
# Calculate distances for new landmark
LOG.info(' Calculating distances for this landmark...')
lm_dists[w] = get_lm_distances(self.G, [w])[w]
lm_dists_rev[w] = get_lm_distances(self.G_reversed, [w])[w]
LOG.info(Fore.RED + 'Choosen landmarks: %s' + Fore.RESET, str(lms))
return lms, lm_dists, lm_dists_rev
def calculate_cost(self, lms, lm_dists):
cost = 0
# Iterate over all edges.
for v in self.G.keys():
for w in self.G[v].keys():
# Iterate over all landmarks
for lm in lms:
if self.G[v][w] - lm_dists[lm][w] + lm_dists[lm][v]:
cost += 1
break
LOG.info('Cost of current solution: %d', cost)
return cost
def get_landmarks(self, lm_num=10):
k = lm_num # For compatibility with description in Goldberg's article.
C = set()
C_dists = {}
C_dists_rev = {}
# Start with getting k landmarks by avoid, add all as candidates
lms, lm_dists, lm_dists_rev = self.get_avoid_landmarks(lm_num=k)
C.update(lms)
C_dists.update(lm_dists)
C_dists_rev.update(lm_dists_rev)
# Repeat until avoid is called 5k times or we have 4k landmarks
i = 0 # Number of calls to Avoid.
# TODO: Too much candidates are generated (4 landmarks - 21 candidates)
# I should probably split that to use avoid one-by-one.
while len(C) < (4 * k) and i < (5 * k):
# With probability 1/2 remove each landmark from the solution
for lm in lms:
if random.randint(0, 1):
lms.remove(lm)
del lm_dists[lm]
del lm_dists_rev[lm]
i += k - len(lms) # Avoid will be called that many times.
# Generate new landmarks
lms, lm_dists, lm_dists_rev = self.get_avoid_landmarks(lms,
lm_dists,
lm_dists_rev,
k)
# Add new ones to C and repeat everything
C.update(lms)
C_dists.update(lm_dists)
C_dists_rev.update(lm_dists_rev)
LOG.info('Got %d candidates in C.', len(C))
# Multistart heuristic with local search - swapping
solutions = []
costs = []
for i in xrange(int(math.log(k + 1, 2))):
LOG.info('Calculating %d for %d sets', i + 1,
int(math.log(k + 1, 2)))
S = set(random.sample(C, k)) # Get k lms from C at random
T = C - S # Rest of candidates
current_cost = self.calculate_cost(S, C_dists)
while True:
profits = {}
for s in S:
for t in T:
LOG.info('Trying out swap %d-%d.', s, t)
new_S = S.copy()
new_S.remove(s)
new_S.add(t)
new_cost = self.calculate_cost(new_S, C_dists)
profit = new_cost - current_cost
if profit > 0:
LOG.info('Swap %d-%d profitable with %d profit.',
s, t, profit)
profits['%d-%d' % (s, t)] = profit
# If no improvement can be made stop.
if not profits:
break
# Otherwise choose swap at random with profit weights
p = profits.values()
p_sum = float(sum(p))
p = [x / p_sum for x in p] # Normalize weigths to sum to 1.0
swap = random_np.choice(profits.keys(), p=p)
LOG.info('Swap %s chosen.', swap)
s, t = swap.split('-')
s, t = int(s), int(t)
S.remove(s)
S.add(t)
current_cost += profits[swap]
LOG.info('No improvements could be made - solution found: %s (%d).',
str(S), current_cost)
solutions.append(S)
costs.append(current_cost)
solution = solutions[max(xrange(len(costs)), key=costs.__getitem__)]
LOG.info('Best solution chosen: %s.', str(solution))
return (list(solution),
{k: v for k, v in C_dists.items() if k in solution},
{k: v for k, v in C_dists_rev.items() if k in solution})
| 38.376884 | 80 | 0.47846 |
b5f1deaa87bc7a0aea8c3b5ed3c4fd851cedd627 | 1,554 | py | Python | people/migrations/0001_initial.py | losolio/website | 5b983e9dfaf604212aab87c51d8904ffc29527a3 | [
"MIT"
] | null | null | null | people/migrations/0001_initial.py | losolio/website | 5b983e9dfaf604212aab87c51d8904ffc29527a3 | [
"MIT"
] | null | null | null | people/migrations/0001_initial.py | losolio/website | 5b983e9dfaf604212aab87c51d8904ffc29527a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import wagtail.wagtailcore.blocks
from django.db import migrations, models
import people.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0006_add_verbose_names'),
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
]
operations = [
migrations.CreateModel(
name='ContributorPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('first_name', models.CharField(default='', max_length=255, blank=True)),
('last_name', models.CharField(default='', max_length=255, blank=True)),
('nickname', models.CharField(default='', max_length=1024, blank=True)),
('email', models.EmailField(default='', max_length=254, blank=True)),
('twitter_handle', models.CharField(default='', max_length=16, blank=True)),
('short_bio', models.TextField(default='', blank=True)),
('long_bio', models.TextField(default='', blank=True)),
('headshot', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| 40.894737 | 161 | 0.616474 |
c29b5951c1d572fdf7110582de9e146296eb2433 | 8,477 | py | Python | tensorflow/compiler/plugin/poplar/tests/topk_onehot_test.py | DebeshJha/tensorflow-1 | 2b5a225c49d25273532d11c424d37ce394d7579a | [
"Apache-2.0"
] | 2 | 2021-03-08T23:32:06.000Z | 2022-01-13T03:43:49.000Z | tensorflow/compiler/plugin/poplar/tests/topk_onehot_test.py | DebeshJha/tensorflow-1 | 2b5a225c49d25273532d11c424d37ce394d7579a | [
"Apache-2.0"
] | null | null | null | tensorflow/compiler/plugin/poplar/tests/topk_onehot_test.py | DebeshJha/tensorflow-1 | 2b5a225c49d25273532d11c424d37ce394d7579a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import test_utils as tu
from tensorflow.compiler.tests import xla_test
from tensorflow.python.platform import googletest
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
class OneHotTopK(xla_test.XLATestCase):
def testOneHot(self):
def executeModel(inputs, expected):
with self.session() as sess:
# Decide what the output type should be.
data_type = inputs["on"].dtype
# The actual model function which perfoms the one-hot operation based on the inputs given to executeModel.
def model(a):
return array_ops.one_hot(a,
inputs["n_classes"],
dtype=data_type,
on_value=inputs["on"],
off_value=inputs["off"],
axis=inputs["axis"])
# We run once on the CPU to get the expected result, then on the IPU to compare the two.
cpuRun = expected is None
with ops.device('cpu'):
pa = array_ops.placeholder(np.int32, inputs["shape"], name="a")
# Check if we should be running on IPU or cpu.
device = "cpu:0" if cpuRun else "/device:IPU:0"
with ops.device(device):
out = model(pa)
tu.ReportJSON(self, sess)
in_data = np.array(inputs["in_values"])
fd = {pa: in_data}
result = sess.run(out, fd)
if cpuRun:
return result
self.assertAllClose(result, expected)
# Generate a multi dimensional matrix.
largish_matrix_size = [4, 3, 4, 2, 2]
largish_matrix_data = np.random.randint(1, np.prod(largish_matrix_size),
largish_matrix_size)
# Generate a vector as well, as using just the matrix will increase test times unnecessarily
vector_size = [4, 3, 4, 2, 2]
vector_data = np.random.randint(1, np.prod(largish_matrix_size),
largish_matrix_size)
inputs = [
# Test different dimensions.
{
"n_classes": 10,
"shape": [4],
"in_values": [1, 2, 3, 4],
"on": np.float32(2.0),
"off": np.float32(0.0),
"axis": -1
},
{
"n_classes": 1200,
"shape": [4, 2],
"in_values": [[1, 1], [2, 5], [4, 3], [4, 6]],
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": -1
},
{
"n_classes": 1200,
"shape": largish_matrix_size,
"in_values": largish_matrix_data,
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": -1
},
# Test different depths
{
"n_classes": 1,
"shape": [4],
"in_values": [1, 2, 3, 4],
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": -1
},
{
"n_classes": 12000,
"shape": [4, 2],
"in_values": [[1, 1], [2, 5], [4, 3], [4, 6]],
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": -1
},
# Test different axes.
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": 0
},
{
"n_classes": 1200,
"shape": largish_matrix_size,
"in_values": largish_matrix_data,
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": 3
},
{
"n_classes": 100,
"shape": largish_matrix_size,
"in_values": largish_matrix_data,
"on": np.float32(1.0),
"off": np.float32(0.0),
"axis": 2
},
# Test different on/off.
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.float32(0.25),
"off": np.float32(0.1),
"axis": 0
},
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.float32(20.0),
"off": np.float32(-1.0),
"axis": 0
},
# Float16 is the only data type we will run on assembly so we have specific cases for that.
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.float16(1.0),
"off": np.float16(0.0),
"axis": 0
},
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.float16(2.0),
"off": np.float16(3.0),
"axis": 1
},
# Check int32 works as well
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.int32(4.0),
"off": np.int32(2.0),
"axis": 0
},
{
"n_classes": 100,
"shape": vector_size,
"in_values": vector_data,
"on": np.int32(4.0),
"off": np.int32(2.0),
"axis": 1
},
]
for test_case in inputs:
# Run on CPU first
result = executeModel(test_case, None)
# Run on IPU and test against CPU out.
executeModel(test_case, result)
def testTopK(self):
def doTestTopK(self, dtype):
with self.session() as sess:
n_categories = 1200
topn = 24
def model(a):
_, indices = nn.top_k(a, topn)
return indices
with ops.device('cpu'):
pa = array_ops.placeholder(dtype, [n_categories], name="a")
with ops.device("/device:IPU:0"):
out = model(pa)
report = tu.ReportJSON(self, sess)
report.reset()
# Shuffled set of values of specified dtype in [0:n_categories).
# This ensures there is a single unique sort result.
pa_input = np.arange(n_categories, dtype=dtype)
np.random.shuffle(pa_input)
expected = (-pa_input).argsort()[:topn]
fd = {pa: pa_input}
result = sess.run(out, fd)
self.assertAllClose(result, expected)
report.parse_log(assert_len=4)
testTypes = [np.float16, np.float32, np.int32]
for dtype in testTypes:
doTestTopK(self, dtype)
def testInTopK(self):
with self.session() as sess:
batchsize = 4
n_categories = 1200
topn = 8
def model(a, b):
return nn.in_top_k(a, b, topn)
with ops.device('cpu'):
pa = array_ops.placeholder(np.float32, [batchsize, n_categories])
pb = array_ops.placeholder(np.int32, [batchsize])
with ops.device("/device:IPU:0"):
out = model(pa, pb)
report = tu.ReportJSON(self, sess)
report.reset()
input = np.random.rand(batchsize, n_categories)
input = input / np.sqrt(np.sum(input**2))
ref = (-input).argsort(axis=1)[:, :1]
ref = ref.reshape([batchsize])
fd = {pa: input, pb: ref}
result = sess.run(out, fd)
self.assertAllClose(result, [True, True, True, True])
report.parse_log(assert_len=4)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
| 29.848592 | 114 | 0.518226 |
402ad993dfdf629ab07b3402f88defdf78811af5 | 8,801 | py | Python | third_party/gstreamer/scripts/gst-plot-timeline.py | isabella232/aistreams | 209f4385425405676a581a749bb915e257dbc1c1 | [
"Apache-2.0"
] | 6 | 2020-09-22T18:07:15.000Z | 2021-10-21T01:34:04.000Z | third_party/gstreamer/scripts/gst-plot-timeline.py | isabella232/aistreams | 209f4385425405676a581a749bb915e257dbc1c1 | [
"Apache-2.0"
] | 2 | 2020-11-10T13:17:39.000Z | 2022-03-30T11:22:14.000Z | third_party/gstreamer/scripts/gst-plot-timeline.py | isabella232/aistreams | 209f4385425405676a581a749bb915e257dbc1c1 | [
"Apache-2.0"
] | 3 | 2020-09-26T08:40:35.000Z | 2021-10-21T01:33:56.000Z | #!/usr/bin/env python
#
# based on plot-timeline.py by Federico Mena-Quintero <federico at ximian dotcom>
# example:
# GST_DEBUG_COLOR_MODE=off GST_DEBUG="*:3" gst-launch-1.0 2>debug.log audiotestsrc num-buffers=10 ! audioconvert ! alsasink
# gst-plot-timeline.py debug.log --output=debug.png
import math
import optparse
import os
import re
import sys
import cairo
FONT_NAME = "Bitstream Vera Sans"
FONT_SIZE = 8
# how many pixels for a second on the timeline
PIXELS_PER_SECOND = 300
# how many pixels for one line of log
PIXELS_PER_LINE = 10
PLOT_WIDTH = 1400
TIME_SCALE_WIDTH = 20
SYSCALL_MARKER_WIDTH = 20
LOG_TEXT_XPOS = 400
LOG_MARKER_WIDTH = 20
BACKGROUND_COLOR = (0, 0, 0)
# assumes GST_DEBUG_LOG_COLOR=1
# timestamp pid thread level category,file,line,msg
mark_regex = re.compile (r'^(\d+:\d+:\d+\.\d+) +\d+ +0?x?[0-9a-f]+ [A-Z]+ +([-a-zA-Z0-9_]+ )(.*)')
mark_timestamp_group = 1
mark_program_group = 2
mark_log_group = 3
success_result = "0"
skip_lines = 0
max_lines = 500
filter_regex = re.compile ('')
skip_regex = re.compile('')
class BaseMark:
colors = 0, 0, 0
def __init__(self, timestamp, log):
self.timestamp = timestamp
self.log = log
self.timestamp_ypos = 0
self.log_ypos = 0
class AccessMark(BaseMark):
pass
class LastMark(BaseMark):
colors = 1.0, 0, 0
class FirstMark(BaseMark):
colors = 1.0, 0, 0
class ExecMark(BaseMark):
# colors = 0.75, 0.33, 0.33
colors = (1.0, 0.0, 0.0)
def __init__(self, timestamp, log):
BaseMark.__init__(self, timestamp,
'execve: ' + os.path.basename(log))
class Metrics:
def __init__(self):
self.width = 0
self.height = 0
# don't use black or red
palette = [
(0.12, 0.29, 0.49),
(0.36, 0.51, 0.71),
(0.75, 0.31, 0.30),
(0.62, 0.73, 0.38),
(0.50, 0.40, 0.63),
(0.29, 0.67, 0.78),
(0.96, 0.62, 0.34)
]
class SyscallParser:
def __init__ (self):
self.syscalls = []
def add_line (self, str):
m = mark_regex.search (str)
if m:
timestr = m.group (mark_timestamp_group).split(':')
timestamp = float (timestr[2]) + (float (timestr[1]) * 60.0) + (float (timestr[0]) * 3600.0)
program = m.group (mark_program_group)
text = program + m.group (mark_log_group)
if text == 'last':
self.syscalls.append (LastMark (timestamp, text))
elif text == 'first':
self.syscalls.append (FirstMark (timestamp, text))
else:
s = AccessMark (timestamp, text)
program_hash = program.__hash__ ()
s.colors = palette[program_hash % len (palette)]
self.syscalls.append (s)
else:
print 'No log in %s' % str
return
def parse_strace(filename):
parser = SyscallParser ()
global skip_lines
global max_lines
global skip_regex
skip_found = False
for line in file(filename, "r").readlines():
if line == "":
break
if not skip_found:
if skip_regex.search(line):
skip_found = True
else:
continue
if skip_lines > 0:
skip_lines -= 1
continue
if len(parser.syscalls) >= max_lines:
break
if filter_regex.search(line):
parser.add_line (line)
return parser.syscalls
def normalize_timestamps(syscalls):
first_timestamp = syscalls[0].timestamp
for syscall in syscalls:
syscall.timestamp -= first_timestamp
def compute_syscall_metrics(syscalls):
global PIXELS_PER_SECOND
global PIXELS_PER_LINE
num_syscalls = len(syscalls)
metrics = Metrics()
metrics.width = PLOT_WIDTH
last_timestamp = syscalls[num_syscalls - 1].timestamp
time_height = int(math.ceil(last_timestamp * PIXELS_PER_SECOND))
line_height = num_syscalls * PIXELS_PER_LINE
if time_height > line_height:
metrics.height = time_height
print "Adjusting PIXELS_PER_LINE = %d" % PIXELS_PER_LINE
PIXELS_PER_LINE = metrics.height / num_syscalls
print " PIXELS_PER_LINE = %d" % PIXELS_PER_LINE
else:
metrics.height = line_height
print "Adjusting PIXELS_PER_SECOND %d" % PIXELS_PER_SECOND
PIXELS_PER_SECOND = int(math.ceil(metrics.height / last_timestamp))
print " PIXELS_PER_SECOND %d" % PIXELS_PER_SECOND
text_ypos = 0
for syscall in syscalls:
syscall.timestamp_ypos = syscall.timestamp * PIXELS_PER_SECOND
syscall.log_ypos = text_ypos + FONT_SIZE
text_ypos += PIXELS_PER_LINE
return metrics
def plot_time_scale(surface, ctx, metrics):
num_seconds = (metrics.height + PIXELS_PER_SECOND - 1) / PIXELS_PER_SECOND
ctx.set_source_rgb(0.5, 0.5, 0.5)
ctx.set_line_width(1.0)
for i in range(num_seconds):
ypos = i * PIXELS_PER_SECOND
ctx.move_to(0, ypos + 0.5)
ctx.line_to(TIME_SCALE_WIDTH, ypos + 0.5)
ctx.stroke()
ctx.move_to(0, ypos + 2 + FONT_SIZE)
ctx.show_text("%d s" % i)
def plot_syscall(surface, ctx, syscall):
ctx.set_source_rgb(*syscall.colors)
# Line
ctx.move_to(TIME_SCALE_WIDTH, syscall.timestamp_ypos)
ctx.line_to(TIME_SCALE_WIDTH + SYSCALL_MARKER_WIDTH, syscall.timestamp_ypos)
ctx.line_to(LOG_TEXT_XPOS - LOG_MARKER_WIDTH, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.line_to(LOG_TEXT_XPOS, syscall.log_ypos - FONT_SIZE / 2 + 0.5)
ctx.stroke()
# Log text
ctx.move_to(LOG_TEXT_XPOS, syscall.log_ypos)
ctx.show_text("%8.5f: %s" % (syscall.timestamp, syscall.log))
def plot_syscalls_to_surface(syscalls, metrics):
num_syscalls = len(syscalls)
print 'picture size: %d x %d' % (metrics.width, metrics.height);
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
metrics.width, metrics.height)
ctx = cairo.Context(surface)
ctx.select_font_face(FONT_NAME)
ctx.set_font_size(FONT_SIZE)
# Background
ctx.set_source_rgb (*BACKGROUND_COLOR)
ctx.rectangle(0, 0, metrics.width, metrics.height)
ctx.fill()
# Time scale
plot_time_scale(surface, ctx, metrics)
# Contents
ctx.set_line_width(1.0)
for syscall in syscalls:
plot_syscall(surface, ctx, syscall)
return surface
def main(args):
global skip_lines
global max_lines
global filter_regex
global skip_regex
option_parser = optparse.OptionParser(
usage="usage: %prog -o output.png <debug.log>")
option_parser.add_option("-o",
"--output", dest="output",
metavar="FILE",
help="Name of output file (output is a PNG file)")
option_parser.add_option("-s",
"--skip", dest="skip",
metavar="LINES",
help="Skip a number of loglines at the beginning of the file or wait till a regular expression happens")
option_parser.add_option("-m",
"--max-lines", dest="max",
help="max lines that need to be plotted")
option_parser.add_option("-f",
"--filter", dest="filter",
help="filter the log lines on a regular expression")
options, args = option_parser.parse_args()
if not options.output:
print 'Please specify an output filename with "-o file.png" or "--output=file.png".'
return 1
if len(args) != 1:
print 'Please specify only one input filename, which is an debug log taken with "GST_DEBUG_COLOR_MODE=off GST_DEBUG=XXX <application>"'
return 1
in_filename = args[0]
out_filename = options.output
if options.skip:
try:
skip_lines = int(options.skip)
except:
skip_regex = re.compile(options.skip)
skip_lines = 0
if options.max:
max_lines = int(options.max)
if options.filter:
filter_regex = re.compile(options.filter)
syscalls = []
for syscall in parse_strace(in_filename):
syscalls.append(syscall)
if isinstance(syscall, FirstMark):
syscalls = []
elif isinstance(syscall, LastMark):
break
if not syscalls:
print 'No logs in %s' % in_filename
return 1
normalize_timestamps(syscalls)
metrics = compute_syscall_metrics(syscalls)
surface = plot_syscalls_to_surface(syscalls, metrics)
surface.write_to_png(out_filename)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 27.939683 | 143 | 0.614021 |
cee8f76ff69a5353235bde241610ac6aaff7ca27 | 868 | py | Python | src/terra/api_lcd.py | terra-dashboard/staketaxcsv | 5793105488bf799c61aee64a45f44e9ae8fef397 | [
"MIT"
] | null | null | null | src/terra/api_lcd.py | terra-dashboard/staketaxcsv | 5793105488bf799c61aee64a45f44e9ae8fef397 | [
"MIT"
] | null | null | null | src/terra/api_lcd.py | terra-dashboard/staketaxcsv | 5793105488bf799c61aee64a45f44e9ae8fef397 | [
"MIT"
] | null | null | null | """
LCD documentation:
* https://lcd.terra.dev/swagger/#/
* https://github.com/terra-money/terra.py/tree/main/terra_sdk/client/lcd/api
"""
import logging
import time
from urllib.parse import urlencode
import requests
from settings_csv import TERRA_LCD_NODE
class LcdAPI:
session = requests.Session()
@classmethod
def contract_info(cls, contract):
uri = "/wasm/contracts/{}".format(contract)
logging.info("Querying lcd for contract = %s ...", contract)
data = cls._query(uri, {})
return data
@classmethod
def _query(cls, uri_path, query_params, sleep_seconds=1):
url = f"{TERRA_LCD_NODE}{uri_path}"
logging.info("Requesting url %s?%s", url, urlencode(query_params))
response = cls.session.get(url, params=query_params)
time.sleep(sleep_seconds)
return response.json()
| 26.30303 | 77 | 0.675115 |
8332b308a368d8b09433ba45a6b56b0e439dcbbc | 4,985 | py | Python | funtool_ipro_processes/adaptors/sequence_import.py | ActiveLearningLab/funtool-ipro-processes | 35547048282d6f39868223fda2da5d3af07af0cf | [
"MIT"
] | null | null | null | funtool_ipro_processes/adaptors/sequence_import.py | ActiveLearningLab/funtool-ipro-processes | 35547048282d6f39868223fda2da5d3af07af0cf | [
"MIT"
] | null | null | null | funtool_ipro_processes/adaptors/sequence_import.py | ActiveLearningLab/funtool-ipro-processes | 35547048282d6f39868223fda2da5d3af07af0cf | [
"MIT"
] | null | null | null | import funtool.adaptor
import funtool.state
import funtool.state_collection
import pymysql
import pymysql.cursors
import yaml
import os
import datetime
import math
def sequence_import(adaptor, state_collection, overriding_parameters=None, logging=None):
adaptor_parameters= funtool.adaptor.get_adaptor_parameters(adaptor,overriding_parameters)
mysql_query= _prepare_query(adaptor_parameters)
connection= _open_connection(_connection_values(adaptor_parameters.get('db_connection')))
user_revisions={}
start_time= datetime.datetime.strptime( adaptor_parameters.get('start_time'), "%Y-%m-%d %H:%M:%S" )
stop_time= datetime.datetime.strptime( adaptor_parameters.get('stop_time'), "%Y-%m-%d %H:%M:%S" )
time_step= int(adaptor_parameters.get('time_step'))
time_slices= math.ceil( (stop_time - start_time).total_seconds()/time_step )
all_rows=[]
try:
reader= _mysql_row(connection, mysql_query)
for row in reader:
all_rows.append(row)
user= row.get(adaptor_parameters.get('user_column','username'))
characterization= row.get(adaptor_parameters.get('characterization_column','characterization'))
time_slice= int(( row.get(adaptor_parameters.get('time_column'),'created_at') - start_time ).total_seconds())//time_step
# Initialize for new user, time_slice
user_revisions[user]= user_revisions.get(user,{})
user_revisions[user][time_slice]= user_revisions[user].get(time_slice,[])
user_revisions[user][time_slice].append(characterization)
finally:
connection.close()
user_sequences= _create_sequences(user_revisions,time_slices,adaptor_parameters.get('trim_leading_none',False))
return funtool.state_collection.StateCollection(states=_create_states_from_sequences(user_sequences), groupings={})
def _open_connection(connection_values):
return pymysql.connect( **connection_values)
def _connection_values(adaptor_db_connection_parameters):
conn_parameters= adaptor_db_connection_parameters.copy()
config_file= conn_parameters.pop('config_file',None)
yaml_config= None
if not config_file is None:
with open(config_file) as f:
yaml_config= yaml.load(f)
if yaml_config is None:
yaml_config= {}
yaml_config.update(conn_parameters)
conn_parameters= yaml_config
conn_parameters['cursorclass']= pymysql.cursors.DictCursor # Forces the cursor to return Dicts
return conn_parameters
def _mysql_row(connection, sql):
with connection.cursor() as cur:
cur.execute(sql)
results= cur.fetchall()
for result in results:
yield result
def _prepare_query(adaptor_parameters):
mysql_query= adaptor_parameters.get('SQL')
if mysql_query is not None:
where_clauses=[]
order_by_clause= None
if adaptor_parameters.get('teams',False):
where_clauses.append( '\nOR '.join([
adaptor_parameters.get('user_column','username') + ' LIKE \'' +team+'%\''
for team in adaptor_parameters.get('teams',[])]))
if adaptor_parameters.get('start_time',False):
where_clauses.append(adaptor_parameters.get('full_time_column')+' > \'' + adaptor_parameters.get('start_time')+'\'')
if adaptor_parameters.get('stop_time',False):
where_clauses.append(adaptor_parameters.get('full_time_column')+' < \'' + adaptor_parameters.get('stop_time') +'\'')
if adaptor_parameters.get('order_by'):
order_by_clause ='ORDER BY ' + ', '.join(adaptor_parameters.get('order_by'))
if len(where_clauses) > 0:
mysql_query+= '\nWHERE '
mysql_query+= 'AND '.join( [ '(' + clause + ')\n' for clause in where_clauses ] )
if not(order_by_clause is None):
mysql_query+= order_by_clause
return mysql_query
def _create_sequences(user_timeslice_characterization, time_slices, trim_common_none=False):
user_sequences= {}
for user in user_timeslice_characterization.keys():
user_sequences[user]= [ user_timeslice_characterization[user].get(0,[None])[-1] ]
for time_slice in range(1,time_slices):
user_sequences[user]= user_sequences[user] + [ user_timeslice_characterization[user].get(time_slice,user_sequences[user])[-1] ]
if trim_common_none:
print('trimming')
first_not_none= min([ next( i for i,c in enumerate(sequence) if not( c is None) ) for sequence in user_sequences.values()])
user_sequences= { user:sequence[first_not_none:] for user,sequence in user_sequences.items()}
return user_sequences
def _create_states_from_sequences(user_sequences):
return [ funtool.state.State(
id=None,
data={'sequence':sequence,'username':user},
measures={},
meta={},
groupings={})
for user,sequence in user_sequences.items() ]
| 45.318182 | 139 | 0.693079 |
49280a106f4820635fa50743e1dfa0b1cd09eac3 | 4,388 | py | Python | rax/_src/types.py | google/rax | d6370d574246db9fb0566317f7cac8cd331526d7 | [
"Apache-2.0"
] | 19 | 2022-01-25T12:37:51.000Z | 2022-03-30T17:12:45.000Z | rax/_src/types.py | google/rax | d6370d574246db9fb0566317f7cac8cd331526d7 | [
"Apache-2.0"
] | 1 | 2022-02-08T23:02:42.000Z | 2022-02-08T23:02:42.000Z | rax/_src/types.py | google/rax | d6370d574246db9fb0566317f7cac8cd331526d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rax-specific types and protocols.
.. note::
Types and protocols are provided for **type-checking** convenience only. You
do **not** need to instantiate, subclass or extend them.
"""
from typing import Optional, Tuple, Union
import jax
# Protocol is a python 3.8+ feature. For older versions, we can use
# typing_extensions, which provides the same functionality.
try:
from typing import Protocol # pylint: disable=g-import-not-at-top
except ImportError:
from typing_extensions import Protocol # pylint: disable=g-import-not-at-top
# Type alias for a JAX array.
Array = jax.numpy.ndarray
class RankFn(Protocol):
""":class:`typing.Protocol` for rank functions."""
def __call__(self, scores: Array, where: Optional[Array],
key: Optional[Array]) -> Array:
"""Computes 1-based ranks based on the given scores.
Args:
scores: The scores to compute the 1-based ranks for.
where: An optional :class:`jax.numpy.ndarray` of the same shape as ``a``
that indicates which elements to rank. Other elements will be ranked
last.
key: An optional :func:`~jax.random.PRNGKey` used for random operations.
Returns:
A :class:`jax.numpy.ndarray` of the same shape as ``scores`` that
represents the 1-based ranks.
"""
pass
class CutoffFn(Protocol):
""":class:`typing.Protocol` for cutoff functions."""
def __call__(self, a: Array, n: Optional[int]) -> Array:
"""Computes cutoffs based on the given array.
Args:
a: The array for which to compute the cutoffs.
n: The position of the cutoff.
Returns:
A binary :class:`jax.numpy.ndarray` of the same shape as ``a`` that
represents which elements of ``a`` should be selected for the topn cutoff.
"""
pass
class ReduceFn(Protocol):
""":class:`typing.Protocol` for reduce functions."""
def __call__(self, a: Array, where: Optional[Array],
axis: Optional[Union[int, Tuple[int, ...]]]) -> Array:
"""Reduces an array across one or more dimensions.
Args:
a: The array to reduce.
where: An optional :class:`jax.numpy.ndarray` of the same shape as ``a``
that indicates which elements to include in the reduction.
axis: One or more axes to use for the reduction. If ``None`` this reduces
across all available axes.
Returns:
A :class:`jax.numpy.ndarray` that represents the reduced result of ``a``
over given ``axis``.
"""
pass
class LossFn(Protocol):
""":class:`typing.Protocol` for loss functions."""
def __call__(self, scores: Array, labels: Array, *,
where: Optional[Array], **kwargs) -> Array:
"""Computes a loss.
Args:
scores: The score of each item.
labels: The label of each item.
where: An optional :class:`jax.numpy.ndarray` of the same shape as
``scores`` that indicates which elements to include in the loss.
**kwargs: Optional loss-specific keyword arguments.
Returns:
A :class:`jax.numpy.ndarray` that represents the loss computed on the
given ``scores`` and ``labels``.
"""
pass
class MetricFn(Protocol):
""":class:`typing.Protocol` for metric functions."""
def __call__(self, scores: Array, labels: Array, *,
where: Optional[Array], **kwargs) -> Array:
"""Computes a metric.
Args:
scores: The score of each item.
labels: The label of each item.
where: An optional :class:`jax.numpy.ndarray` of the same shape as
``scores`` that indicates which elements to include in the metric.
**kwargs: Optional metric-specific keyword arguments.
Returns:
A :class:`jax.numpy.ndarray` that represents the metric computed on the
given ``scores`` and ``labels``.
"""
pass
| 31.797101 | 80 | 0.670693 |
4a13c6a1a7547673d9cd98ad4bb31e9ead66b1ac | 4,126 | py | Python | benchmark/startQiskit_noisy1054.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1054.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1054.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=47
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[1],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[1]) # number=37
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.h(input_qubit[4]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=44
prog.x(input_qubit[2]) # number=45
prog.cx(input_qubit[0],input_qubit[2]) # number=46
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=33
prog.z(input_qubit[3]) # number=34
prog.x(input_qubit[4]) # number=40
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.x(input_qubit[0]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.y(input_qubit[1]) # number=32
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1054.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 31.257576 | 82 | 0.618032 |
ab4d33eb50262c8bb65e58d3388be65b6e89f82b | 2,221 | py | Python | fiesta/fiesta/nuFlux.py | rodrigruiz/super-duper-fiesta | f09de41f6016fdef0790b783fba9111961e5e33c | [
"MIT"
] | null | null | null | fiesta/fiesta/nuFlux.py | rodrigruiz/super-duper-fiesta | f09de41f6016fdef0790b783fba9111961e5e33c | [
"MIT"
] | null | null | null | fiesta/fiesta/nuFlux.py | rodrigruiz/super-duper-fiesta | f09de41f6016fdef0790b783fba9111961e5e33c | [
"MIT"
] | null | null | null | import numpy as np
def read_flux_file(f):
"""
Read atmospheric neutrino fluxes from an asscii file.
The file is organised in 5 columns as follows:
column1 -- log10(E/GeV)
column2 -- F(nu_e)/(s^-1*m^-2)
column3 -- F(nu_mu)/(s^-1*m^-2)
column4 -- F(nu_e_bar)/(s^-1*m^-2)
column5 -- F(nu_mu_bar)/(s^-1*m^-2)
Keyword arguments:
f -- the path to the file
Returns a dictionary with the following keys (a conversion factor is applied so that the flux units are s^-1*cm^-2*sr^-1):
d["log_E"]=np.array(log_e) log10(GeV)
d["E"]=np.power(10, np.array(log_e)) GeV
d["nu_e"]=np.array(nue)
d["nu_mu"]=np.array(numu)
d["anu_e"]=np.array(nuebar)
d["anu_mu"]=np.array(numubar)
"""
d={}
units_factor = 1.e-4/(4*np.pi) #from m^-2 to cm^-2, and from full solid angle to sr^-1
log_e, nue, numu, nuebar, numubar = ([] for i in range (5))
File = open(f,"r")
lines = File.readlines()
for line in lines:
columns = line.split(' ')
log_e.append(float(columns[0]))
nue.append(float(columns[1])*units_factor)
numu.append(float(columns[2])*units_factor)
nuebar.append(float(columns[3])*units_factor)
numubar.append(float(columns[4])*units_factor)
d["log_E"]=np.array(log_e)
d["E"]=np.power(10, np.array(log_e))
d["nu_e"]=np.array(nue)
d["nu_mu"]=np.array(numu)
d["anu_e"]=np.array(nuebar)
d["anu_mu"]=np.array(numubar)
File.close()
return d
def astro_flux(e, phi, gamma, e0, c0):
"""
Returns the astrophysical neutrino flux in units of s^-1*cm^-2*sr^-1. This follows the parameterisation described in https://arxiv.org/pdf/2001.09520.pdf.
Keyword arguments:
e -- energy
phi -- flux normalisation
gamma -- spectral index
e0 -- reference energy
c0 -- reference normalisation
"""
return c0 * phi * np.power((e/e0), -gamma)
def atmospheric_flux(e, phi, gamma):
"""
Returns the atmospheric neutrino flux parametrised according to a simple power law, in units of s^-1*cm^-2*sr^-1.
Keyword arguments:
e -- energy
phi -- flux normalisation
gamma -- spectral index
"""
return phi * np.power((e), -gamma)
| 32.188406 | 158 | 0.620891 |
c3c86d37e2301b330cfa9c55a6dc58ca4716592f | 74 | py | Python | samples/run_bbands.py | tibkiss/pyalgotrade | 4979315281c362dcba2e6d53da27dc4a7377ebec | [
"Apache-2.0"
] | 2 | 2015-04-03T10:29:14.000Z | 2017-01-21T05:55:00.000Z | samples/run_bbands.py | tibkiss/pyalgotrade | 4979315281c362dcba2e6d53da27dc4a7377ebec | [
"Apache-2.0"
] | null | null | null | samples/run_bbands.py | tibkiss/pyalgotrade | 4979315281c362dcba2e6d53da27dc4a7377ebec | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.append('samples')
import bbands
bbands.main(False)
| 9.25 | 26 | 0.756757 |
de36df902fc4d98c4ba9fbbcd0cb485250ed432e | 1,616 | py | Python | mindspore/ops/_op_impl/tbe/div.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | mindspore/ops/_op_impl/tbe/div.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 1 | 2020-12-29T06:46:38.000Z | 2020-12-29T06:46:38.000Z | mindspore/ops/_op_impl/tbe/div.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Div op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
div_op_info = TBERegOp("Div") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("div.so") \
.compute_cost(10) \
.kernel_name("div") \
.partial_flag(True) \
.input(0, "x1", False, "required", "all") \
.input(1, "x2", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("broadcast") \
.dtype_format(DataType.I8_None, DataType.I8_None, DataType.I8_None) \
.dtype_format(DataType.U8_None, DataType.U8_None, DataType.U8_None) \
.dtype_format(DataType.I32_None, DataType.I32_None, DataType.I32_None) \
.dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(div_op_info)
def _div_tbe():
"""Div TBE register"""
return
| 38.47619 | 79 | 0.679455 |
c410285b244864f2308d26b162b393ab2a29a959 | 359 | py | Python | pythonClass.py | AlisonGurgel/pythonNotes | 963f6c9a7d24471b37ec7f27c4337a8d7540ca1e | [
"CC0-1.0"
] | null | null | null | pythonClass.py | AlisonGurgel/pythonNotes | 963f6c9a7d24471b37ec7f27c4337a8d7540ca1e | [
"CC0-1.0"
] | null | null | null | pythonClass.py | AlisonGurgel/pythonNotes | 963f6c9a7d24471b37ec7f27c4337a8d7540ca1e | [
"CC0-1.0"
] | null | null | null | class Car:
def __init__(this, n, y):
this.carName = n
this.carYear = y
this.attFunc = print(this.carName, "\_/", this.carYear)
def getYear(this):
print(this.carYear)
def start(that, km):
print(f"{that.carName} start moving at {km}kmh")
c1 = Car("BMW", 1990)
c1.start(10)
c1.getYear()
c1.attFunc
| 19.944444 | 63 | 0.573816 |
241d9ded8a5c7ad9e75d56166fa6eb1fa3243de0 | 5,302 | py | Python | scripts/testing/test_model_load.py | latika-bhurani/fk-visual-search | 6061d75b00163774135cf9297f44b4bbcc6b62df | [
"Apache-2.0"
] | 1 | 2018-12-19T17:46:58.000Z | 2018-12-19T17:46:58.000Z | scripts/testing/test_model_load.py | latika-bhurani/fk-visual-search | 6061d75b00163774135cf9297f44b4bbcc6b62df | [
"Apache-2.0"
] | null | null | null | scripts/testing/test_model_load.py | latika-bhurani/fk-visual-search | 6061d75b00163774135cf9297f44b4bbcc6b62df | [
"Apache-2.0"
] | null | null | null | import base64
# import h
import numpy as np
import io
from PIL import Image
import keras
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Dense
from keras.layers import BatchNormalization
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.models import Model
# from keras import backend as K
from flask import jsonify
from flask import Flask
from flask import request
import tensorflow as tf
import keras.backend as K
from keras.layers import concatenate, Flatten, Input, Lambda
from keras.applications.vgg16 import preprocess_input
# from keras.applications import ResNet50
from keras.preprocessing.image import img_to_array
# from keras.applications import imagenet_utils
from keras.models import load_model
# from PIL import Image
# import numpy as np
import flask
# import io
import h5py
def triplet_loss(_, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def accuracy(_, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
print('Loading Model')
my_model = load_model('fashion_lens_triplet_model.h5', custom_objects={'l2Norm': l2Norm, 'triplet_loss':triplet_loss, 'accuracy':accuracy, 'euclidean_distance':euclidean_distance})
print(" * Model Loaded! ")
query_img_path = '.\\structured_images\\wtbi_skirts_query_crop_256\\3.jpg'
query_img = image.load_img(query_img_path, target_size=(224, 224))
query_img_data = image.img_to_array(query_img)
query_img_data = np.expand_dims(query_img_data, axis=0)
query_img_data = preprocess_input(query_img_data)
print(query_img_data.shape)
dummy_pos = np.random.randn(*query_img_data.shape)
dummy_neg = np.random.randn(*query_img_data.shape)
# b = np.random.randn(*a.shape)
# print(b.shape)
# print(my_model.predict([query_img_data, dummy_pos, dummy_neg]))
# [<tf.Tensor 'query_input:0' shape=(?, 224, 224, 3) dtype=float32>, <tf.Tensor 'positive_input:0' shape=(?, 224, 224, 3) dtype=float32>, <tf.Tensor 'negative_input:0' shape=(?, 224, 224, 3) dtype=float32>]
print(my_model.get_input_at(0)[0])
# vizNet_model.get_input_at(0)
## extract the feature vector
model_name = 'vizNet_model'
layer_name = 'viznet_embedding'
# extract_feature_model = Model(inputs=my_model.get_input_at(0)[0],
# outputs=my_model.get_layer(model_name).get_layer(layer_name).output)
extract_feature_model = Model(inputs=my_model.get_layer(model_name).get_input_at(0),
outputs=my_model.get_layer(model_name).get_layer(layer_name).output)
# feature_vector = my_model.predict([query_img_data, dummy_pos, dummy_neg])
feature_vector = extract_feature_model.predict(query_img_data)
print(feature_vector)
print(feature_vector.shape)
print(feature_vector.tolist())
# Layer (type) Output Shape Param # Connected to
# ==================================================================================================
# query_input (InputLayer) (None, 224, 224, 3) 0
# __________________________________________________________________________________________________
# positive_input (InputLayer) (None, 224, 224, 3) 0
# __________________________________________________________________________________________________
# negative_input (InputLayer) (None, 224, 224, 3) 0
# __________________________________________________________________________________________________
# vizNet_model (Model) (None, 4096) 164846038 query_input[0][0]
# positive_input[0][0]
# negative_input[0][0]
# __________________________________________________________________________________________________
# pos_dist (Lambda) (None, 1) 0 vizNet_model[1][0]
# vizNet_model[2][0]
# __________________________________________________________________________________________________
# neg_dist (Lambda) (None, 1) 0 vizNet_model[1][0]
# vizNet_model[3][0]
# __________________________________________________________________________________________________
# stacked_dists (Lambda) (None, 2, 1) 0 pos_dist[0][0]
# neg_dist[0][0]
# =============================================================================================== | 47.339286 | 207 | 0.620709 |
d91fca0120368a2cb04a9679818504a34b2207b2 | 2,101 | py | Python | examples/dice.py | jvanstraten/openql-ci-test | a8ff9c8c30991ff4ac81551a698a7e1c40471910 | [
"Apache-2.0"
] | 1 | 2021-11-10T11:45:57.000Z | 2021-11-10T11:45:57.000Z | examples/dice.py | jvanstraten/openql-ci-test | a8ff9c8c30991ff4ac81551a698a7e1c40471910 | [
"Apache-2.0"
] | 6 | 2020-03-26T16:59:23.000Z | 2021-04-30T07:51:12.000Z | examples/dice.py | jvanstraten/openql-ci-test | a8ff9c8c30991ff4ac81551a698a7e1c40471910 | [
"Apache-2.0"
] | 1 | 2022-02-21T22:07:04.000Z | 2022-02-21T22:07:04.000Z | from openql import openql as ql
import qxelarator
from functools import reduce
import os
import matplotlib.pyplot as plt
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, 'test_output')
ql.set_option('output_dir', output_dir)
ql.set_option('write_qasm_files', 'yes')
ql.set_option('scheduler', 'ASAP')
ql.set_option('log_level', 'LOG_INFO')
nqubits = 3
def dice_compile():
print('compiling 8-face dice program by openql')
config = os.path.join(curdir, '../tests/hardware_config_qx.json')
platform = ql.Platform("myPlatform", config)
p = ql.Program('dice', platform, nqubits)
k = ql.Kernel('aKernel', platform, nqubits)
for q in range(nqubits):
k.gate('h', [q])
for q in range(nqubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
def plot_histogram(dice_faces):
plt.hist(dice_faces, bins=8, color='#0504aa',alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Dice Face',fontsize=15)
plt.ylabel('Frequency',fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.ylabel('Frequency',fontsize=15)
plt.title('Histogram',fontsize=15)
plt.show()
plt.savefig('hist.png')
def dice_execute_singleshot():
print('executing 8-face dice program on qxelarator')
qx = qxelarator.QX()
qx.set('test_output/dice.qasm')
qx.execute()
res = [int(qx.get_measurement_outcome(q)) for q in range(nqubits)]
dice_face = reduce(lambda x, y: 2*x+y, res, 0) + 1
print('Dice face : {}'.format(dice_face))
def dice_execute_multishot():
print('executing 8-face dice program on qxelarator')
qx = qxelarator.QX()
qx.set('test_output/dice.qasm')
dice_faces = []
ntests = 100
for i in range(ntests):
qx.execute()
res = [int(qx.get_measurement_outcome(q)) for q in range(nqubits)]
dice_face = reduce(lambda x, y: 2*x+y, res, 0) +1
dice_faces.append(dice_face)
plot_histogram(dice_faces)
if __name__ == '__main__':
dice_compile()
dice_execute_singleshot()
# dice_execute_multishot()
| 28.391892 | 74 | 0.671109 |
21686540e82d4ecf18dc58d9ec797496b29cc5d7 | 539,374 | py | Python | google/ads/google_ads/v3/services/enums.py | xflesym/google-ads-python | 11abbea977345d3352b0c1efcea6f4312b25df30 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/services/enums.py | xflesym/google-ads-python | 11abbea977345d3352b0c1efcea6f4312b25df30 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/services/enums.py | xflesym/google-ads-python | 11abbea977345d3352b0c1efcea6f4312b25df30 | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class _CreateEnumTypeUponFirstAccess(object):
r"""A lazy container class for a single capitalized Enum attribute.
It will exec code to define the Enum type within the instance dict upon
first access. Reducing import time for a module full of Enum definitions.
Example:
>>> class ThingHolder(_CreateEnumTypeUponFirstAccess):
... Thing = '''\
... Thing = '''\
class Thing(enum.IntEnum):
... GOOD = 0
... BAD = 1
... UNKNOWN = 2
... '''
...
>>> ThingHolder = ThingHolder() # The instance enables the magic.
This avoids executing the slow Python enum.Enum metaclass construction code
at import time. Instead, the first time ThingHolder.Thing is accessed, the
code to define the enum type will be run and replace the Thing attribute on
ThingHolder with the instantiated Thing type.
>>> ThingHolder
<ThingHolder object at 0x...>
>>> dir(ThingHolder)
['Thing']
>>> ThingHolder.Thing.UNKNOWN # Triggers the actual creation of Thing.
<Thing.UNKNOWN: 2>
>>> ThingHolder.Thing.GOOD
<Thing.GOOD: 0>
>>> ThingHolder
<class 'ThingHolder'>
The import time enum type creation cost is deferred. The runtime cost upon
first access will be a bit higher. Under the assumption that most
applications never access the majority of the enum types, the reduction in
startup time makes for a nicer experience.
"""
def __getattribute__(self, name):
# We use type(self) instead of self.__class__ to avoid recursing.
names = [k for k in type(self).__dict__ if k[0].isupper()]
assert len(names) <= 1, names
if not names:
# Race condition, another thread finished executing this before us.
return getattr(self, enum_type_name)
enum_type_name = names[0]
if name == enum_type_name:
try:
class_def_src = getattr(type(self), enum_type_name)
except AttributeError:
# The only way this should happen is if another thread was also
# executing this method at the same time and finished defining
# the enum class on our instance and deleting its source from
# the class before our own call got this far.
# Assume our method is gone and revert to a regular instance
# attribute lookup to find our class.
assert not hasattr(type(self), '__getattribute__')
return getattr(self, enum_type_name)
if not isinstance(class_def_src, str):
# If the class attribute has been replaced with something else,
# another must have already executed it and created our type.
return class_def_src
assert class_def_src.startswith('class %s(enum.' % enum_type_name), 'Expected ' + enum_type_name
# It is possible for multiple threads to wind up doing this exec at
# the same time. That'll create multiple identical types and assign
# them into the instance dict under the same name. One of them will
# "win" the final spot in class dict. BUT each simultaneous
# "first" call may wind up returning an "obsolete" type... Not
# great, but as they're IntEnum classes, nobody should be using
# the class type in type checks. Add a lock if this bothers you.
six.exec_(class_def_src, {'enum': enum},
object.__getattribute__(self, '__dict__'))
# We've done our job, get out of the way forever.
type(self).__getattribute__ = object.__getattribute__
enum_type = getattr(self, enum_type_name)
# No need for the enum class source anymore.
###delattr(type(self), enum_type_name)
# ...
# These two statements are overkill and need rewriting if this base
# class were moved to a utility library or used from outside this
# file as globals() refers to the scope in the current file. If
# you get rid of these, uncomment the delattr above to still
# release some memory.
setattr(type(self), enum_type_name, enum_type)
# Get rid of our instance singleton now that its job is done.
# (obsoletes the reassigning of __getattribute__ above)
globals()[type(self).__name__] = type(self)
# ...
return enum_type
else:
raise AttributeError('%r has no attribute %r' %
(type(self).__name__, name))
class AccessInvitationErrorEnum(_CreateEnumTypeUponFirstAccess):
AccessInvitationErrorEnum = '''\
class AccessInvitationError(enum.IntEnum):
"""
Enum describing possible AccessInvitation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_EMAIL_ADDRESS (int): The email address is invalid for sending an invitation.
EMAIL_ADDRESS_ALREADY_HAS_ACCESS (int): Email address already has access to this customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_EMAIL_ADDRESS = 2
EMAIL_ADDRESS_ALREADY_HAS_ACCESS = 3
'''
AccessInvitationErrorEnum = AccessInvitationErrorEnum() # For __getattribute__
class AccessReasonEnum(_CreateEnumTypeUponFirstAccess):
AccessReasonEnum = '''\
class AccessReason(enum.IntEnum):
"""
Enum describing possible access reasons.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
OWNED (int): The resource is owned by the user.
SHARED (int): The resource is shared to the user.
LICENSED (int): The resource is licensed to the user.
SUBSCRIBED (int): The user subscribed to the resource.
AFFILIATED (int): The resource is accessible to the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OWNED = 2
SHARED = 3
LICENSED = 4
SUBSCRIBED = 5
AFFILIATED = 6
'''
AccessReasonEnum = AccessReasonEnum() # For __getattribute__
class AccessRoleEnum(_CreateEnumTypeUponFirstAccess):
AccessRoleEnum = '''\
class AccessRole(enum.IntEnum):
"""
Possible access role of a user.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADMIN (int): Owns its account and can control the addition of other users.
STANDARD (int): Can modify campaigns, but can't affect other users.
READ_ONLY (int): Can view campaigns and account changes, but cannot make edits.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADMIN = 2
STANDARD = 3
READ_ONLY = 4
'''
AccessRoleEnum = AccessRoleEnum() # For __getattribute__
class AccountBudgetProposalErrorEnum(_CreateEnumTypeUponFirstAccess):
AccountBudgetProposalErrorEnum = '''\
class AccountBudgetProposalError(enum.IntEnum):
"""
Enum describing possible account budget proposal errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FIELD_MASK_NOT_ALLOWED (int): The field mask must be empty for create/end/remove proposals.
IMMUTABLE_FIELD (int): The field cannot be set because of the proposal type.
REQUIRED_FIELD_MISSING (int): The field is required because of the proposal type.
CANNOT_CANCEL_APPROVED_PROPOSAL (int): Proposals that have been approved cannot be cancelled.
CANNOT_REMOVE_UNAPPROVED_BUDGET (int): Budgets that haven't been approved cannot be removed.
CANNOT_REMOVE_RUNNING_BUDGET (int): Budgets that are currently running cannot be removed.
CANNOT_END_UNAPPROVED_BUDGET (int): Budgets that haven't been approved cannot be truncated.
CANNOT_END_INACTIVE_BUDGET (int): Only budgets that are currently running can be truncated.
BUDGET_NAME_REQUIRED (int): All budgets must have names.
CANNOT_UPDATE_OLD_BUDGET (int): Expired budgets cannot be edited after a sufficient amount of time has
passed.
CANNOT_END_IN_PAST (int): It is not permissible a propose a new budget that ends in the past.
CANNOT_EXTEND_END_TIME (int): An expired budget cannot be extended to overlap with the running budget.
PURCHASE_ORDER_NUMBER_REQUIRED (int): A purchase order number is required.
PENDING_UPDATE_PROPOSAL_EXISTS (int): Budgets that have a pending update cannot be updated.
MULTIPLE_BUDGETS_NOT_ALLOWED_FOR_UNAPPROVED_BILLING_SETUP (int): Cannot propose more than one budget when the corresponding billing setup
hasn't been approved.
CANNOT_UPDATE_START_TIME_FOR_STARTED_BUDGET (int): Cannot update the start time of a budget that has already started.
SPENDING_LIMIT_LOWER_THAN_ACCRUED_COST_NOT_ALLOWED (int): Cannot update the spending limit of a budget with an amount lower than
what has already been spent.
UPDATE_IS_NO_OP (int): Cannot propose a budget update without actually changing any fields.
END_TIME_MUST_FOLLOW_START_TIME (int): The end time must come after the start time.
BUDGET_DATE_RANGE_INCOMPATIBLE_WITH_BILLING_SETUP (int): The budget's date range must fall within the date range of its billing
setup.
NOT_AUTHORIZED (int): The user is not authorized to mutate budgets for the given billing setup.
INVALID_BILLING_SETUP (int): Mutates are not allowed for the given billing setup.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIELD_MASK_NOT_ALLOWED = 2
IMMUTABLE_FIELD = 3
REQUIRED_FIELD_MISSING = 4
CANNOT_CANCEL_APPROVED_PROPOSAL = 5
CANNOT_REMOVE_UNAPPROVED_BUDGET = 6
CANNOT_REMOVE_RUNNING_BUDGET = 7
CANNOT_END_UNAPPROVED_BUDGET = 8
CANNOT_END_INACTIVE_BUDGET = 9
BUDGET_NAME_REQUIRED = 10
CANNOT_UPDATE_OLD_BUDGET = 11
CANNOT_END_IN_PAST = 12
CANNOT_EXTEND_END_TIME = 13
PURCHASE_ORDER_NUMBER_REQUIRED = 14
PENDING_UPDATE_PROPOSAL_EXISTS = 15
MULTIPLE_BUDGETS_NOT_ALLOWED_FOR_UNAPPROVED_BILLING_SETUP = 16
CANNOT_UPDATE_START_TIME_FOR_STARTED_BUDGET = 17
SPENDING_LIMIT_LOWER_THAN_ACCRUED_COST_NOT_ALLOWED = 18
UPDATE_IS_NO_OP = 19
END_TIME_MUST_FOLLOW_START_TIME = 20
BUDGET_DATE_RANGE_INCOMPATIBLE_WITH_BILLING_SETUP = 21
NOT_AUTHORIZED = 22
INVALID_BILLING_SETUP = 23
'''
AccountBudgetProposalErrorEnum = AccountBudgetProposalErrorEnum() # For __getattribute__
class AccountBudgetProposalStatusEnum(_CreateEnumTypeUponFirstAccess):
AccountBudgetProposalStatusEnum = '''\
class AccountBudgetProposalStatus(enum.IntEnum):
"""
The possible statuses of an AccountBudgetProposal.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The proposal is pending approval.
APPROVED_HELD (int): The proposal has been approved but the corresponding billing setup
has not. This can occur for proposals that set up the first budget
when signing up for billing or when performing a change of bill-to
operation.
APPROVED (int): The proposal has been approved.
CANCELLED (int): The proposal has been cancelled by the user.
REJECTED (int): The proposal has been rejected by the user, e.g. by rejecting an
acceptance email.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
REJECTED = 6
'''
AccountBudgetProposalStatusEnum = AccountBudgetProposalStatusEnum() # For __getattribute__
class AccountBudgetProposalTypeEnum(_CreateEnumTypeUponFirstAccess):
AccountBudgetProposalTypeEnum = '''\
class AccountBudgetProposalType(enum.IntEnum):
"""
The possible types of an AccountBudgetProposal.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CREATE (int): Identifies a request to create a new budget.
UPDATE (int): Identifies a request to edit an existing budget.
END (int): Identifies a request to end a budget that has already started.
REMOVE (int): Identifies a request to remove a budget that hasn't started yet.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CREATE = 2
UPDATE = 3
END = 4
REMOVE = 5
'''
AccountBudgetProposalTypeEnum = AccountBudgetProposalTypeEnum() # For __getattribute__
class AccountBudgetStatusEnum(_CreateEnumTypeUponFirstAccess):
AccountBudgetStatusEnum = '''\
class AccountBudgetStatus(enum.IntEnum):
"""
The possible statuses of an AccountBudget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The account budget is pending approval.
APPROVED (int): The account budget has been approved.
CANCELLED (int): The account budget has been cancelled by the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED = 3
CANCELLED = 4
'''
AccountBudgetStatusEnum = AccountBudgetStatusEnum() # For __getattribute__
class AdCustomizerErrorEnum(_CreateEnumTypeUponFirstAccess):
AdCustomizerErrorEnum = '''\
class AdCustomizerError(enum.IntEnum):
"""
Enum describing possible ad customizer errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
COUNTDOWN_INVALID_DATE_FORMAT (int): Invalid date argument in countdown function.
COUNTDOWN_DATE_IN_PAST (int): Countdown end date is in the past.
COUNTDOWN_INVALID_LOCALE (int): Invalid locale string in countdown function.
COUNTDOWN_INVALID_START_DAYS_BEFORE (int): Days-before argument to countdown function is not positive.
UNKNOWN_USER_LIST (int): A user list referenced in an IF function does not exist.
"""
UNSPECIFIED = 0
UNKNOWN = 1
COUNTDOWN_INVALID_DATE_FORMAT = 2
COUNTDOWN_DATE_IN_PAST = 3
COUNTDOWN_INVALID_LOCALE = 4
COUNTDOWN_INVALID_START_DAYS_BEFORE = 5
UNKNOWN_USER_LIST = 6
'''
AdCustomizerErrorEnum = AdCustomizerErrorEnum() # For __getattribute__
class AdCustomizerPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
AdCustomizerPlaceholderFieldEnum = '''\
class AdCustomizerPlaceholderField(enum.IntEnum):
"""
Possible values for Ad Customizers placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INTEGER (int): Data Type: INT64. Integer value to be inserted.
PRICE (int): Data Type: STRING. Price value to be inserted.
DATE (int): Data Type: DATE\_TIME. Date value to be inserted.
STRING (int): Data Type: STRING. String value to be inserted.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INTEGER = 2
PRICE = 3
DATE = 4
STRING = 5
'''
AdCustomizerPlaceholderFieldEnum = AdCustomizerPlaceholderFieldEnum() # For __getattribute__
class AdErrorEnum(_CreateEnumTypeUponFirstAccess):
AdErrorEnum = '''\
class AdError(enum.IntEnum):
"""
Enum describing possible ad errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_CUSTOMIZERS_NOT_SUPPORTED_FOR_AD_TYPE (int): Ad customizers are not supported for ad type.
APPROXIMATELY_TOO_LONG (int): Estimating character sizes the string is too long.
APPROXIMATELY_TOO_SHORT (int): Estimating character sizes the string is too short.
BAD_SNIPPET (int): There is a problem with the snippet.
CANNOT_MODIFY_AD (int): Cannot modify an ad.
CANNOT_SET_BUSINESS_NAME_IF_URL_SET (int): business name and url cannot be set at the same time
CANNOT_SET_FIELD (int): The specified field is incompatible with this ad's type or settings.
CANNOT_SET_FIELD_WITH_ORIGIN_AD_ID_SET (int): Cannot set field when originAdId is set.
CANNOT_SET_FIELD_WITH_AD_ID_SET_FOR_SHARING (int): Cannot set field when an existing ad id is set for sharing.
CANNOT_SET_ALLOW_FLEXIBLE_COLOR_FALSE (int): Cannot set allowFlexibleColor false if no color is provided by user.
CANNOT_SET_COLOR_CONTROL_WHEN_NATIVE_FORMAT_SETTING (int): When user select native, no color control is allowed because we will
always respect publisher color for native format serving.
CANNOT_SET_URL (int): Cannot specify a url for the ad type
CANNOT_SET_WITHOUT_FINAL_URLS (int): Cannot specify a tracking or mobile url without also setting final urls
CANNOT_SET_WITH_FINAL_URLS (int): Cannot specify a legacy url and a final url simultaneously
CANNOT_SET_WITH_URL_DATA (int): Cannot specify a urls in UrlData and in template fields simultaneously.
CANNOT_USE_AD_SUBCLASS_FOR_OPERATOR (int): This operator cannot be used with a subclass of Ad.
CUSTOMER_NOT_APPROVED_MOBILEADS (int): Customer is not approved for mobile ads.
CUSTOMER_NOT_APPROVED_THIRDPARTY_ADS (int): Customer is not approved for 3PAS richmedia ads.
CUSTOMER_NOT_APPROVED_THIRDPARTY_REDIRECT_ADS (int): Customer is not approved for 3PAS redirect richmedia (Ad Exchange) ads.
CUSTOMER_NOT_ELIGIBLE (int): Not an eligible customer
CUSTOMER_NOT_ELIGIBLE_FOR_UPDATING_BEACON_URL (int): Customer is not eligible for updating beacon url
DIMENSION_ALREADY_IN_UNION (int): There already exists an ad with the same dimensions in the union.
DIMENSION_MUST_BE_SET (int): Ad's dimension must be set before setting union dimension.
DIMENSION_NOT_IN_UNION (int): Ad's dimension must be included in the union dimensions.
DISPLAY_URL_CANNOT_BE_SPECIFIED (int): Display Url cannot be specified (applies to Ad Exchange Ads)
DOMESTIC_PHONE_NUMBER_FORMAT (int): Telephone number contains invalid characters or invalid format. Please
re-enter your number using digits (0-9), dashes (-), and parentheses
only.
EMERGENCY_PHONE_NUMBER (int): Emergency telephone numbers are not allowed. Please enter a valid
domestic phone number to connect customers to your business.
EMPTY_FIELD (int): A required field was not specified or is an empty string.
FEED_ATTRIBUTE_MUST_HAVE_MAPPING_FOR_TYPE_ID (int): A feed attribute referenced in an ad customizer tag is not in the ad
customizer mapping for the feed.
FEED_ATTRIBUTE_MAPPING_TYPE_MISMATCH (int): The ad customizer field mapping for the feed attribute does not match the
expected field type.
ILLEGAL_AD_CUSTOMIZER_TAG_USE (int): The use of ad customizer tags in the ad text is disallowed. Details in
trigger.
ILLEGAL_TAG_USE (int): Tags of the form {PH\_x}, where x is a number, are disallowed in ad
text.
INCONSISTENT_DIMENSIONS (int): The dimensions of the ad are specified or derived in multiple ways and
are not consistent.
INCONSISTENT_STATUS_IN_TEMPLATE_UNION (int): The status cannot differ among template ads of the same union.
INCORRECT_LENGTH (int): The length of the string is not valid.
INELIGIBLE_FOR_UPGRADE (int): The ad is ineligible for upgrade.
INVALID_AD_ADDRESS_CAMPAIGN_TARGET (int): User cannot create mobile ad for countries targeted in specified
campaign.
INVALID_AD_TYPE (int): Invalid Ad type. A specific type of Ad is required.
INVALID_ATTRIBUTES_FOR_MOBILE_IMAGE (int): Headline, description or phone cannot be present when creating mobile
image ad.
INVALID_ATTRIBUTES_FOR_MOBILE_TEXT (int): Image cannot be present when creating mobile text ad.
INVALID_CALL_TO_ACTION_TEXT (int): Invalid call to action text.
INVALID_CHARACTER_FOR_URL (int): Invalid character in URL.
INVALID_COUNTRY_CODE (int): Creative's country code is not valid.
INVALID_EXPANDED_DYNAMIC_SEARCH_AD_TAG (int): Invalid use of Expanded Dynamic Search Ads tags ({lpurl} etc.)
INVALID_INPUT (int): An input error whose real reason was not properly mapped (should not
happen).
INVALID_MARKUP_LANGUAGE (int): An invalid markup language was entered.
INVALID_MOBILE_CARRIER (int): An invalid mobile carrier was entered.
INVALID_MOBILE_CARRIER_TARGET (int): Specified mobile carriers target a country not targeted by the campaign.
INVALID_NUMBER_OF_ELEMENTS (int): Wrong number of elements for given element type
INVALID_PHONE_NUMBER_FORMAT (int): The format of the telephone number is incorrect. Please re-enter the
number using the correct format.
INVALID_RICH_MEDIA_CERTIFIED_VENDOR_FORMAT_ID (int): The certified vendor format id is incorrect.
INVALID_TEMPLATE_DATA (int): The template ad data contains validation errors.
INVALID_TEMPLATE_ELEMENT_FIELD_TYPE (int): The template field doesn't have have the correct type.
INVALID_TEMPLATE_ID (int): Invalid template id.
LINE_TOO_WIDE (int): After substituting replacement strings, the line is too wide.
MISSING_AD_CUSTOMIZER_MAPPING (int): The feed referenced must have ad customizer mapping to be used in a
customizer tag.
MISSING_ADDRESS_COMPONENT (int): Missing address component in template element address field.
MISSING_ADVERTISEMENT_NAME (int): An ad name must be entered.
MISSING_BUSINESS_NAME (int): Business name must be entered.
MISSING_DESCRIPTION1 (int): Description (line 2) must be entered.
MISSING_DESCRIPTION2 (int): Description (line 3) must be entered.
MISSING_DESTINATION_URL_TAG (int): The destination url must contain at least one tag (e.g. {lpurl})
MISSING_LANDING_PAGE_URL_TAG (int): The tracking url template of ExpandedDynamicSearchAd must contain at
least one tag. (e.g. {lpurl})
MISSING_DIMENSION (int): A valid dimension must be specified for this ad.
MISSING_DISPLAY_URL (int): A display URL must be entered.
MISSING_HEADLINE (int): Headline must be entered.
MISSING_HEIGHT (int): A height must be entered.
MISSING_IMAGE (int): An image must be entered.
MISSING_MARKETING_IMAGE_OR_PRODUCT_VIDEOS (int): Marketing image or product videos are required.
MISSING_MARKUP_LANGUAGES (int): The markup language in which your site is written must be entered.
MISSING_MOBILE_CARRIER (int): A mobile carrier must be entered.
MISSING_PHONE (int): Phone number must be entered.
MISSING_REQUIRED_TEMPLATE_FIELDS (int): Missing required template fields
MISSING_TEMPLATE_FIELD_VALUE (int): Missing a required field value
MISSING_TEXT (int): The ad must have text.
MISSING_VISIBLE_URL (int): A visible URL must be entered.
MISSING_WIDTH (int): A width must be entered.
MULTIPLE_DISTINCT_FEEDS_UNSUPPORTED (int): Only 1 feed can be used as the source of ad customizer substitutions in a
single ad.
MUST_USE_TEMP_AD_UNION_ID_ON_ADD (int): TempAdUnionId must be use when adding template ads.
TOO_LONG (int): The string has too many characters.
TOO_SHORT (int): The string has too few characters.
UNION_DIMENSIONS_CANNOT_CHANGE (int): Ad union dimensions cannot change for saved ads.
UNKNOWN_ADDRESS_COMPONENT (int): Address component is not {country, lat, lng}.
UNKNOWN_FIELD_NAME (int): Unknown unique field name
UNKNOWN_UNIQUE_NAME (int): Unknown unique name (template element type specifier)
UNSUPPORTED_DIMENSIONS (int): Unsupported ad dimension
URL_INVALID_SCHEME (int): URL starts with an invalid scheme.
URL_INVALID_TOP_LEVEL_DOMAIN (int): URL ends with an invalid top-level domain name.
URL_MALFORMED (int): URL contains illegal characters.
URL_NO_HOST (int): URL must contain a host name.
URL_NOT_EQUIVALENT (int): URL not equivalent during upgrade.
URL_HOST_NAME_TOO_LONG (int): URL host name too long to be stored as visible URL (applies to Ad
Exchange ads)
URL_NO_SCHEME (int): URL must start with a scheme.
URL_NO_TOP_LEVEL_DOMAIN (int): URL should end in a valid domain extension, such as .com or .net.
URL_PATH_NOT_ALLOWED (int): URL must not end with a path.
URL_PORT_NOT_ALLOWED (int): URL must not specify a port.
URL_QUERY_NOT_ALLOWED (int): URL must not contain a query.
URL_SCHEME_BEFORE_EXPANDED_DYNAMIC_SEARCH_AD_TAG (int): A url scheme is not allowed in front of tag in tracking url template
(e.g. http://{lpurl})
USER_DOES_NOT_HAVE_ACCESS_TO_TEMPLATE (int): The user does not have permissions to create a template ad for the given
template.
INCONSISTENT_EXPANDABLE_SETTINGS (int): Expandable setting is inconsistent/wrong. For example, an AdX ad is
invalid if it has a expandable vendor format but no expanding directions
specified, or expanding directions is specified, but the vendor format is
not expandable.
INVALID_FORMAT (int): Format is invalid
INVALID_FIELD_TEXT (int): The text of this field did not match a pattern of allowed values.
ELEMENT_NOT_PRESENT (int): Template element is mising
IMAGE_ERROR (int): Error occurred during image processing
VALUE_NOT_IN_RANGE (int): The value is not within the valid range
FIELD_NOT_PRESENT (int): Template element field is not present
ADDRESS_NOT_COMPLETE (int): Address is incomplete
ADDRESS_INVALID (int): Invalid address
VIDEO_RETRIEVAL_ERROR (int): Error retrieving specified video
AUDIO_ERROR (int): Error processing audio
INVALID_YOUTUBE_DISPLAY_URL (int): Display URL is incorrect for YouTube PYV ads
TOO_MANY_PRODUCT_IMAGES (int): Too many product Images in GmailAd
TOO_MANY_PRODUCT_VIDEOS (int): Too many product Videos in GmailAd
INCOMPATIBLE_AD_TYPE_AND_DEVICE_PREFERENCE (int): The device preference is not compatible with the ad type
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported for specified country.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): Carrier specific short number is not allowed.
DISALLOWED_NUMBER_TYPE (int): Specified phone number type is disallowed.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number not supported for country.
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY (int): Phone number not supported with call tracking enabled for country.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate phone number is not allowed.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Vanity phone number is not allowed.
INVALID_CALL_CONVERSION_TYPE_ID (int): Invalid call conversion type id.
CANNOT_DISABLE_CALL_CONVERSION_AND_SET_CONVERSION_TYPE_ID (int): Cannot disable call conversion and set conversion type id.
CANNOT_SET_PATH2_WITHOUT_PATH1 (int): Cannot set path2 without path1.
MISSING_DYNAMIC_SEARCH_ADS_SETTING_DOMAIN_NAME (int): Missing domain name in campaign setting when adding expanded dynamic
search ad.
INCOMPATIBLE_WITH_RESTRICTION_TYPE (int): The associated ad is not compatible with restriction type.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Consent for call recording is required for creating/updating call only
ads. Please see https://support.google.com/google-ads/answer/7412639.
MISSING_IMAGE_OR_MEDIA_BUNDLE (int): Either an image or a media bundle is required in a display upload ad.
PRODUCT_TYPE_NOT_SUPPORTED_IN_THIS_CAMPAIGN (int): The display upload product type is not supported in this campaign.
PLACEHOLDER_CANNOT_HAVE_EMPTY_DEFAULT_VALUE (int): The default value of an ad placeholder can not be the empty string.
PLACEHOLDER_COUNTDOWN_FUNCTION_CANNOT_HAVE_DEFAULT_VALUE (int): Ad placeholders with countdown functions must not have a default value.
PLACEHOLDER_DEFAULT_VALUE_MISSING (int): A previous ad placeholder that had a default value was found which means
that all (non-countdown) placeholders must have a default value. This
ad placeholder does not have a default value.
UNEXPECTED_PLACEHOLDER_DEFAULT_VALUE (int): A previous ad placeholder that did not have a default value was found
which means that no placeholders may have a default value. This
ad placeholder does have a default value.
AD_CUSTOMIZERS_MAY_NOT_BE_ADJACENT (int): Two ad customizers may not be directly adjacent in an ad text. They must
be separated by at least one character.
UPDATING_AD_WITH_NO_ENABLED_ASSOCIATION (int): The ad is not associated with any enabled AdGroupAd, and cannot be
updated.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_CUSTOMIZERS_NOT_SUPPORTED_FOR_AD_TYPE = 2
APPROXIMATELY_TOO_LONG = 3
APPROXIMATELY_TOO_SHORT = 4
BAD_SNIPPET = 5
CANNOT_MODIFY_AD = 6
CANNOT_SET_BUSINESS_NAME_IF_URL_SET = 7
CANNOT_SET_FIELD = 8
CANNOT_SET_FIELD_WITH_ORIGIN_AD_ID_SET = 9
CANNOT_SET_FIELD_WITH_AD_ID_SET_FOR_SHARING = 10
CANNOT_SET_ALLOW_FLEXIBLE_COLOR_FALSE = 11
CANNOT_SET_COLOR_CONTROL_WHEN_NATIVE_FORMAT_SETTING = 12
CANNOT_SET_URL = 13
CANNOT_SET_WITHOUT_FINAL_URLS = 14
CANNOT_SET_WITH_FINAL_URLS = 15
CANNOT_SET_WITH_URL_DATA = 17
CANNOT_USE_AD_SUBCLASS_FOR_OPERATOR = 18
CUSTOMER_NOT_APPROVED_MOBILEADS = 19
CUSTOMER_NOT_APPROVED_THIRDPARTY_ADS = 20
CUSTOMER_NOT_APPROVED_THIRDPARTY_REDIRECT_ADS = 21
CUSTOMER_NOT_ELIGIBLE = 22
CUSTOMER_NOT_ELIGIBLE_FOR_UPDATING_BEACON_URL = 23
DIMENSION_ALREADY_IN_UNION = 24
DIMENSION_MUST_BE_SET = 25
DIMENSION_NOT_IN_UNION = 26
DISPLAY_URL_CANNOT_BE_SPECIFIED = 27
DOMESTIC_PHONE_NUMBER_FORMAT = 28
EMERGENCY_PHONE_NUMBER = 29
EMPTY_FIELD = 30
FEED_ATTRIBUTE_MUST_HAVE_MAPPING_FOR_TYPE_ID = 31
FEED_ATTRIBUTE_MAPPING_TYPE_MISMATCH = 32
ILLEGAL_AD_CUSTOMIZER_TAG_USE = 33
ILLEGAL_TAG_USE = 34
INCONSISTENT_DIMENSIONS = 35
INCONSISTENT_STATUS_IN_TEMPLATE_UNION = 36
INCORRECT_LENGTH = 37
INELIGIBLE_FOR_UPGRADE = 38
INVALID_AD_ADDRESS_CAMPAIGN_TARGET = 39
INVALID_AD_TYPE = 40
INVALID_ATTRIBUTES_FOR_MOBILE_IMAGE = 41
INVALID_ATTRIBUTES_FOR_MOBILE_TEXT = 42
INVALID_CALL_TO_ACTION_TEXT = 43
INVALID_CHARACTER_FOR_URL = 44
INVALID_COUNTRY_CODE = 45
INVALID_EXPANDED_DYNAMIC_SEARCH_AD_TAG = 47
INVALID_INPUT = 48
INVALID_MARKUP_LANGUAGE = 49
INVALID_MOBILE_CARRIER = 50
INVALID_MOBILE_CARRIER_TARGET = 51
INVALID_NUMBER_OF_ELEMENTS = 52
INVALID_PHONE_NUMBER_FORMAT = 53
INVALID_RICH_MEDIA_CERTIFIED_VENDOR_FORMAT_ID = 54
INVALID_TEMPLATE_DATA = 55
INVALID_TEMPLATE_ELEMENT_FIELD_TYPE = 56
INVALID_TEMPLATE_ID = 57
LINE_TOO_WIDE = 58
MISSING_AD_CUSTOMIZER_MAPPING = 59
MISSING_ADDRESS_COMPONENT = 60
MISSING_ADVERTISEMENT_NAME = 61
MISSING_BUSINESS_NAME = 62
MISSING_DESCRIPTION1 = 63
MISSING_DESCRIPTION2 = 64
MISSING_DESTINATION_URL_TAG = 65
MISSING_LANDING_PAGE_URL_TAG = 66
MISSING_DIMENSION = 67
MISSING_DISPLAY_URL = 68
MISSING_HEADLINE = 69
MISSING_HEIGHT = 70
MISSING_IMAGE = 71
MISSING_MARKETING_IMAGE_OR_PRODUCT_VIDEOS = 72
MISSING_MARKUP_LANGUAGES = 73
MISSING_MOBILE_CARRIER = 74
MISSING_PHONE = 75
MISSING_REQUIRED_TEMPLATE_FIELDS = 76
MISSING_TEMPLATE_FIELD_VALUE = 77
MISSING_TEXT = 78
MISSING_VISIBLE_URL = 79
MISSING_WIDTH = 80
MULTIPLE_DISTINCT_FEEDS_UNSUPPORTED = 81
MUST_USE_TEMP_AD_UNION_ID_ON_ADD = 82
TOO_LONG = 83
TOO_SHORT = 84
UNION_DIMENSIONS_CANNOT_CHANGE = 85
UNKNOWN_ADDRESS_COMPONENT = 86
UNKNOWN_FIELD_NAME = 87
UNKNOWN_UNIQUE_NAME = 88
UNSUPPORTED_DIMENSIONS = 89
URL_INVALID_SCHEME = 90
URL_INVALID_TOP_LEVEL_DOMAIN = 91
URL_MALFORMED = 92
URL_NO_HOST = 93
URL_NOT_EQUIVALENT = 94
URL_HOST_NAME_TOO_LONG = 95
URL_NO_SCHEME = 96
URL_NO_TOP_LEVEL_DOMAIN = 97
URL_PATH_NOT_ALLOWED = 98
URL_PORT_NOT_ALLOWED = 99
URL_QUERY_NOT_ALLOWED = 100
URL_SCHEME_BEFORE_EXPANDED_DYNAMIC_SEARCH_AD_TAG = 102
USER_DOES_NOT_HAVE_ACCESS_TO_TEMPLATE = 103
INCONSISTENT_EXPANDABLE_SETTINGS = 104
INVALID_FORMAT = 105
INVALID_FIELD_TEXT = 106
ELEMENT_NOT_PRESENT = 107
IMAGE_ERROR = 108
VALUE_NOT_IN_RANGE = 109
FIELD_NOT_PRESENT = 110
ADDRESS_NOT_COMPLETE = 111
ADDRESS_INVALID = 112
VIDEO_RETRIEVAL_ERROR = 113
AUDIO_ERROR = 114
INVALID_YOUTUBE_DISPLAY_URL = 115
TOO_MANY_PRODUCT_IMAGES = 116
TOO_MANY_PRODUCT_VIDEOS = 117
INCOMPATIBLE_AD_TYPE_AND_DEVICE_PREFERENCE = 118
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 119
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 120
DISALLOWED_NUMBER_TYPE = 121
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 122
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY = 123
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 124
VANITY_PHONE_NUMBER_NOT_ALLOWED = 125
INVALID_CALL_CONVERSION_TYPE_ID = 126
CANNOT_DISABLE_CALL_CONVERSION_AND_SET_CONVERSION_TYPE_ID = 127
CANNOT_SET_PATH2_WITHOUT_PATH1 = 128
MISSING_DYNAMIC_SEARCH_ADS_SETTING_DOMAIN_NAME = 129
INCOMPATIBLE_WITH_RESTRICTION_TYPE = 130
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 131
MISSING_IMAGE_OR_MEDIA_BUNDLE = 132
PRODUCT_TYPE_NOT_SUPPORTED_IN_THIS_CAMPAIGN = 133
PLACEHOLDER_CANNOT_HAVE_EMPTY_DEFAULT_VALUE = 134
PLACEHOLDER_COUNTDOWN_FUNCTION_CANNOT_HAVE_DEFAULT_VALUE = 135
PLACEHOLDER_DEFAULT_VALUE_MISSING = 136
UNEXPECTED_PLACEHOLDER_DEFAULT_VALUE = 137
AD_CUSTOMIZERS_MAY_NOT_BE_ADJACENT = 138
UPDATING_AD_WITH_NO_ENABLED_ASSOCIATION = 139
'''
AdErrorEnum = AdErrorEnum() # For __getattribute__
class AdGroupAdErrorEnum(_CreateEnumTypeUponFirstAccess):
AdGroupAdErrorEnum = '''\
class AdGroupAdError(enum.IntEnum):
"""
Enum describing possible ad group ad errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_AD_LABEL_DOES_NOT_EXIST (int): No link found between the adgroup ad and the label.
AD_GROUP_AD_LABEL_ALREADY_EXISTS (int): The label has already been attached to the adgroup ad.
AD_NOT_UNDER_ADGROUP (int): The specified ad was not found in the adgroup
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD (int): Removed ads may not be modified
CANNOT_CREATE_DEPRECATED_ADS (int): An ad of this type is deprecated and cannot be created. Only deletions
are permitted.
CANNOT_CREATE_TEXT_ADS (int): Text ads are deprecated and cannot be created. Use expanded text ads
instead.
EMPTY_FIELD (int): A required field was not specified or is an empty string.
RESOURCE_REFERENCED_IN_MULTIPLE_OPS (int): An ad may only be modified once per call
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_AD_LABEL_ALREADY_EXISTS = 3
AD_NOT_UNDER_ADGROUP = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD = 5
CANNOT_CREATE_DEPRECATED_ADS = 6
CANNOT_CREATE_TEXT_ADS = 7
EMPTY_FIELD = 8
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 9
'''
AdGroupAdErrorEnum = AdGroupAdErrorEnum() # For __getattribute__
class AdGroupAdRotationModeEnum(_CreateEnumTypeUponFirstAccess):
AdGroupAdRotationModeEnum = '''\
class AdGroupAdRotationMode(enum.IntEnum):
"""
The possible ad rotation modes of an ad group.
Attributes:
UNSPECIFIED (int): The ad rotation mode has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
OPTIMIZE (int): Optimize ad group ads based on clicks or conversions.
ROTATE_FOREVER (int): Rotate evenly forever.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE = 2
ROTATE_FOREVER = 3
'''
AdGroupAdRotationModeEnum = AdGroupAdRotationModeEnum() # For __getattribute__
class AdGroupAdStatusEnum(_CreateEnumTypeUponFirstAccess):
AdGroupAdStatusEnum = '''\
class AdGroupAdStatus(enum.IntEnum):
"""
The possible statuses of an AdGroupAd.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The ad group ad is enabled.
PAUSED (int): The ad group ad is paused.
REMOVED (int): The ad group ad is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
'''
AdGroupAdStatusEnum = AdGroupAdStatusEnum() # For __getattribute__
class AdGroupBidModifierErrorEnum(_CreateEnumTypeUponFirstAccess):
AdGroupBidModifierErrorEnum = '''\
class AdGroupBidModifierError(enum.IntEnum):
"""
Enum describing possible ad group bid modifier errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CRITERION_ID_NOT_SUPPORTED (int): The criterion ID does not support bid modification.
CANNOT_OVERRIDE_OPTED_OUT_CAMPAIGN_CRITERION_BID_MODIFIER (int): Cannot override the bid modifier for the given criterion ID if the parent
campaign is opted out of the same criterion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CRITERION_ID_NOT_SUPPORTED = 2
CANNOT_OVERRIDE_OPTED_OUT_CAMPAIGN_CRITERION_BID_MODIFIER = 3
'''
AdGroupBidModifierErrorEnum = AdGroupBidModifierErrorEnum() # For __getattribute__
class AdGroupCriterionApprovalStatusEnum(_CreateEnumTypeUponFirstAccess):
AdGroupCriterionApprovalStatusEnum = '''\
class AdGroupCriterionApprovalStatus(enum.IntEnum):
"""
Enumerates AdGroupCriterion approval statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
APPROVED (int): Approved.
DISAPPROVED (int): Disapproved.
PENDING_REVIEW (int): Pending Review.
UNDER_REVIEW (int): Under review.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
PENDING_REVIEW = 4
UNDER_REVIEW = 5
'''
AdGroupCriterionApprovalStatusEnum = AdGroupCriterionApprovalStatusEnum() # For __getattribute__
class AdGroupCriterionErrorEnum(_CreateEnumTypeUponFirstAccess):
AdGroupCriterionErrorEnum = '''\
class AdGroupCriterionError(enum.IntEnum):
"""
Enum describing possible ad group criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_CRITERION_LABEL_DOES_NOT_EXIST (int): No link found between the AdGroupCriterion and the label.
AD_GROUP_CRITERION_LABEL_ALREADY_EXISTS (int): The label has already been attached to the AdGroupCriterion.
CANNOT_ADD_LABEL_TO_NEGATIVE_CRITERION (int): Negative AdGroupCriterion cannot have labels.
TOO_MANY_OPERATIONS (int): Too many operations for a single call.
CANT_UPDATE_NEGATIVE (int): Negative ad group criteria are not updateable.
CONCRETE_TYPE_REQUIRED (int): Concrete type of criterion (keyword v.s. placement) is required for ADD
and SET operations.
BID_INCOMPATIBLE_WITH_ADGROUP (int): Bid is incompatible with ad group's bidding settings.
CANNOT_TARGET_AND_EXCLUDE (int): Cannot target and exclude the same criterion at once.
ILLEGAL_URL (int): The URL of a placement is invalid.
INVALID_KEYWORD_TEXT (int): Keyword text was invalid.
INVALID_DESTINATION_URL (int): Destination URL was invalid.
MISSING_DESTINATION_URL_TAG (int): The destination url must contain at least one tag (e.g. {lpurl})
KEYWORD_LEVEL_BID_NOT_SUPPORTED_FOR_MANUALCPM (int): Keyword-level cpm bid is not supported
INVALID_USER_STATUS (int): For example, cannot add a biddable ad group criterion that had been
removed.
CANNOT_ADD_CRITERIA_TYPE (int): Criteria type cannot be targeted for the ad group. Either the account is
restricted to keywords only, the criteria type is incompatible with the
campaign's bidding strategy, or the criteria type can only be applied to
campaigns.
CANNOT_EXCLUDE_CRITERIA_TYPE (int): Criteria type cannot be excluded for the ad group. Refer to the
documentation for a specific criterion to check if it is excludable.
CAMPAIGN_TYPE_NOT_COMPATIBLE_WITH_PARTIAL_FAILURE (int): Partial failure is not supported for shopping campaign mutate operations.
OPERATIONS_FOR_TOO_MANY_SHOPPING_ADGROUPS (int): Operations in the mutate request changes too many shopping ad groups.
Please split requests for multiple shopping ad groups across multiple
requests.
CANNOT_MODIFY_URL_FIELDS_WITH_DUPLICATE_ELEMENTS (int): Not allowed to modify url fields of an ad group criterion if there are
duplicate elements for that ad group criterion in the request.
CANNOT_SET_WITHOUT_FINAL_URLS (int): Cannot set url fields without also setting final urls.
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_MOBILE_URLS_EXIST (int): Cannot clear final urls if final mobile urls exist.
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_APP_URLS_EXIST (int): Cannot clear final urls if final app urls exist.
CANNOT_CLEAR_FINAL_URLS_IF_TRACKING_URL_TEMPLATE_EXISTS (int): Cannot clear final urls if tracking url template exists.
CANNOT_CLEAR_FINAL_URLS_IF_URL_CUSTOM_PARAMETERS_EXIST (int): Cannot clear final urls if url custom parameters exist.
CANNOT_SET_BOTH_DESTINATION_URL_AND_FINAL_URLS (int): Cannot set both destination url and final urls.
CANNOT_SET_BOTH_DESTINATION_URL_AND_TRACKING_URL_TEMPLATE (int): Cannot set both destination url and tracking url template.
FINAL_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE (int): Final urls are not supported for this criterion type.
FINAL_MOBILE_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE (int): Final mobile urls are not supported for this criterion type.
INVALID_LISTING_GROUP_HIERARCHY (int): Ad group is invalid due to the listing groups it contains.
LISTING_GROUP_UNIT_CANNOT_HAVE_CHILDREN (int): Listing group unit cannot have children.
LISTING_GROUP_SUBDIVISION_REQUIRES_OTHERS_CASE (int): Subdivided listing groups must have an "others" case.
LISTING_GROUP_REQUIRES_SAME_DIMENSION_TYPE_AS_SIBLINGS (int): Dimension type of listing group must be the same as that of its siblings.
LISTING_GROUP_ALREADY_EXISTS (int): Listing group cannot be added to the ad group because it already exists.
LISTING_GROUP_DOES_NOT_EXIST (int): Listing group referenced in the operation was not found in the ad group.
LISTING_GROUP_CANNOT_BE_REMOVED (int): Recursive removal failed because listing group subdivision is being
created or modified in this request.
INVALID_LISTING_GROUP_TYPE (int): Listing group type is not allowed for specified ad group criterion type.
LISTING_GROUP_ADD_MAY_ONLY_USE_TEMP_ID (int): Listing group in an ADD operation specifies a non temporary criterion id.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_CRITERION_LABEL_ALREADY_EXISTS = 3
CANNOT_ADD_LABEL_TO_NEGATIVE_CRITERION = 4
TOO_MANY_OPERATIONS = 5
CANT_UPDATE_NEGATIVE = 6
CONCRETE_TYPE_REQUIRED = 7
BID_INCOMPATIBLE_WITH_ADGROUP = 8
CANNOT_TARGET_AND_EXCLUDE = 9
ILLEGAL_URL = 10
INVALID_KEYWORD_TEXT = 11
INVALID_DESTINATION_URL = 12
MISSING_DESTINATION_URL_TAG = 13
KEYWORD_LEVEL_BID_NOT_SUPPORTED_FOR_MANUALCPM = 14
INVALID_USER_STATUS = 15
CANNOT_ADD_CRITERIA_TYPE = 16
CANNOT_EXCLUDE_CRITERIA_TYPE = 17
CAMPAIGN_TYPE_NOT_COMPATIBLE_WITH_PARTIAL_FAILURE = 27
OPERATIONS_FOR_TOO_MANY_SHOPPING_ADGROUPS = 28
CANNOT_MODIFY_URL_FIELDS_WITH_DUPLICATE_ELEMENTS = 29
CANNOT_SET_WITHOUT_FINAL_URLS = 30
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_MOBILE_URLS_EXIST = 31
CANNOT_CLEAR_FINAL_URLS_IF_FINAL_APP_URLS_EXIST = 32
CANNOT_CLEAR_FINAL_URLS_IF_TRACKING_URL_TEMPLATE_EXISTS = 33
CANNOT_CLEAR_FINAL_URLS_IF_URL_CUSTOM_PARAMETERS_EXIST = 34
CANNOT_SET_BOTH_DESTINATION_URL_AND_FINAL_URLS = 35
CANNOT_SET_BOTH_DESTINATION_URL_AND_TRACKING_URL_TEMPLATE = 36
FINAL_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE = 37
FINAL_MOBILE_URLS_NOT_SUPPORTED_FOR_CRITERION_TYPE = 38
INVALID_LISTING_GROUP_HIERARCHY = 39
LISTING_GROUP_UNIT_CANNOT_HAVE_CHILDREN = 40
LISTING_GROUP_SUBDIVISION_REQUIRES_OTHERS_CASE = 41
LISTING_GROUP_REQUIRES_SAME_DIMENSION_TYPE_AS_SIBLINGS = 42
LISTING_GROUP_ALREADY_EXISTS = 43
LISTING_GROUP_DOES_NOT_EXIST = 44
LISTING_GROUP_CANNOT_BE_REMOVED = 45
INVALID_LISTING_GROUP_TYPE = 46
LISTING_GROUP_ADD_MAY_ONLY_USE_TEMP_ID = 47
'''
AdGroupCriterionErrorEnum = AdGroupCriterionErrorEnum() # For __getattribute__
class AdGroupCriterionStatusEnum(_CreateEnumTypeUponFirstAccess):
AdGroupCriterionStatusEnum = '''\
class AdGroupCriterionStatus(enum.IntEnum):
"""
The possible statuses of an AdGroupCriterion.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The ad group criterion is enabled.
PAUSED (int): The ad group criterion is paused.
REMOVED (int): The ad group criterion is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
'''
AdGroupCriterionStatusEnum = AdGroupCriterionStatusEnum() # For __getattribute__
class AdGroupErrorEnum(_CreateEnumTypeUponFirstAccess):
AdGroupErrorEnum = '''\
class AdGroupError(enum.IntEnum):
"""
Enum describing possible ad group errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_ADGROUP_NAME (int): AdGroup with the same name already exists for the campaign.
INVALID_ADGROUP_NAME (int): AdGroup name is not valid.
ADVERTISER_NOT_ON_CONTENT_NETWORK (int): Advertiser is not allowed to target sites or set site bids that are not
on the Google Search Network.
BID_TOO_BIG (int): Bid amount is too big.
BID_TYPE_AND_BIDDING_STRATEGY_MISMATCH (int): AdGroup bid does not match the campaign's bidding strategy.
MISSING_ADGROUP_NAME (int): AdGroup name is required for Add.
ADGROUP_LABEL_DOES_NOT_EXIST (int): No link found between the ad group and the label.
ADGROUP_LABEL_ALREADY_EXISTS (int): The label has already been attached to the ad group.
INVALID_CONTENT_BID_CRITERION_TYPE_GROUP (int): The CriterionTypeGroup is not supported for the content bid dimension.
AD_GROUP_TYPE_NOT_VALID_FOR_ADVERTISING_CHANNEL_TYPE (int): The ad group type is not compatible with the campaign channel type.
ADGROUP_TYPE_NOT_SUPPORTED_FOR_CAMPAIGN_SALES_COUNTRY (int): The ad group type is not supported in the country of sale of the
campaign.
CANNOT_ADD_ADGROUP_OF_TYPE_DSA_TO_CAMPAIGN_WITHOUT_DSA_SETTING (int): Ad groups of AdGroupType.SEARCH\_DYNAMIC\_ADS can only be added to
campaigns that have DynamicSearchAdsSetting attached.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ADGROUP_NAME = 2
INVALID_ADGROUP_NAME = 3
ADVERTISER_NOT_ON_CONTENT_NETWORK = 5
BID_TOO_BIG = 6
BID_TYPE_AND_BIDDING_STRATEGY_MISMATCH = 7
MISSING_ADGROUP_NAME = 8
ADGROUP_LABEL_DOES_NOT_EXIST = 9
ADGROUP_LABEL_ALREADY_EXISTS = 10
INVALID_CONTENT_BID_CRITERION_TYPE_GROUP = 11
AD_GROUP_TYPE_NOT_VALID_FOR_ADVERTISING_CHANNEL_TYPE = 12
ADGROUP_TYPE_NOT_SUPPORTED_FOR_CAMPAIGN_SALES_COUNTRY = 13
CANNOT_ADD_ADGROUP_OF_TYPE_DSA_TO_CAMPAIGN_WITHOUT_DSA_SETTING = 14
'''
AdGroupErrorEnum = AdGroupErrorEnum() # For __getattribute__
class AdGroupFeedErrorEnum(_CreateEnumTypeUponFirstAccess):
AdGroupFeedErrorEnum = '''\
class AdGroupFeedError(enum.IntEnum):
"""
Enum describing possible ad group feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active feed already exists for this ad group and place holder type.
CANNOT_CREATE_FOR_REMOVED_FEED (int): The specified feed is removed.
ADGROUP_FEED_ALREADY_EXISTS (int): The AdGroupFeed already exists. UPDATE operation should be used to modify
the existing AdGroupFeed.
CANNOT_OPERATE_ON_REMOVED_ADGROUP_FEED (int): Cannot operate on removed AdGroupFeed.
INVALID_PLACEHOLDER_TYPE (int): Invalid placeholder type.
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE (int): Feed mapping for this placeholder type does not exist.
NO_EXISTING_LOCATION_CUSTOMER_FEED (int): Location AdGroupFeeds cannot be created unless there is a location
CustomerFeed for the specified feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 3
ADGROUP_FEED_ALREADY_EXISTS = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUP_FEED = 5
INVALID_PLACEHOLDER_TYPE = 6
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 7
NO_EXISTING_LOCATION_CUSTOMER_FEED = 8
'''
AdGroupFeedErrorEnum = AdGroupFeedErrorEnum() # For __getattribute__
class AdGroupStatusEnum(_CreateEnumTypeUponFirstAccess):
AdGroupStatusEnum = '''\
class AdGroupStatus(enum.IntEnum):
"""
The possible statuses of an ad group.
Attributes:
UNSPECIFIED (int): The status has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The ad group is enabled.
PAUSED (int): The ad group is paused.
REMOVED (int): The ad group is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
'''
AdGroupStatusEnum = AdGroupStatusEnum() # For __getattribute__
class AdGroupTypeEnum(_CreateEnumTypeUponFirstAccess):
AdGroupTypeEnum = '''\
class AdGroupType(enum.IntEnum):
"""
Enum listing the possible types of an ad group.
Attributes:
UNSPECIFIED (int): The type has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
SEARCH_STANDARD (int): The default ad group type for Search campaigns.
DISPLAY_STANDARD (int): The default ad group type for Display campaigns.
SHOPPING_PRODUCT_ADS (int): The ad group type for Shopping campaigns serving standard product ads.
HOTEL_ADS (int): The default ad group type for Hotel campaigns.
SHOPPING_SMART_ADS (int): The type for ad groups in Smart Shopping campaigns.
VIDEO_BUMPER (int): Short unskippable in-stream video ads.
VIDEO_TRUE_VIEW_IN_STREAM (int): TrueView (skippable) in-stream video ads.
VIDEO_TRUE_VIEW_IN_DISPLAY (int): TrueView in-display video ads.
VIDEO_NON_SKIPPABLE_IN_STREAM (int): Unskippable in-stream video ads.
VIDEO_OUTSTREAM (int): Outstream video ads.
SEARCH_DYNAMIC_ADS (int): Ad group type for Dynamic Search Ads ad groups.
SHOPPING_COMPARISON_LISTING_ADS (int): The type for ad groups in Shopping Comparison Listing campaigns.
PROMOTED_HOTEL_ADS (int): The ad group type for Promoted Hotel ad groups.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_STANDARD = 2
DISPLAY_STANDARD = 3
SHOPPING_PRODUCT_ADS = 4
HOTEL_ADS = 6
SHOPPING_SMART_ADS = 7
VIDEO_BUMPER = 8
VIDEO_TRUE_VIEW_IN_STREAM = 9
VIDEO_TRUE_VIEW_IN_DISPLAY = 10
VIDEO_NON_SKIPPABLE_IN_STREAM = 11
VIDEO_OUTSTREAM = 12
SEARCH_DYNAMIC_ADS = 13
SHOPPING_COMPARISON_LISTING_ADS = 14
PROMOTED_HOTEL_ADS = 15
'''
AdGroupTypeEnum = AdGroupTypeEnum() # For __getattribute__
class AdNetworkTypeEnum(_CreateEnumTypeUponFirstAccess):
AdNetworkTypeEnum = '''\
class AdNetworkType(enum.IntEnum):
"""
Enumerates Google Ads network types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
SEARCH (int): Google search.
SEARCH_PARTNERS (int): Search partners.
CONTENT (int): Display Network.
YOUTUBE_SEARCH (int): YouTube Search.
YOUTUBE_WATCH (int): YouTube Videos
MIXED (int): Cross-network.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH = 2
SEARCH_PARTNERS = 3
CONTENT = 4
YOUTUBE_SEARCH = 5
YOUTUBE_WATCH = 6
MIXED = 7
'''
AdNetworkTypeEnum = AdNetworkTypeEnum() # For __getattribute__
class AdParameterErrorEnum(_CreateEnumTypeUponFirstAccess):
AdParameterErrorEnum = '''\
class AdParameterError(enum.IntEnum):
"""
Enum describing possible ad parameter errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_CRITERION_MUST_BE_KEYWORD (int): The ad group criterion must be a keyword criterion.
INVALID_INSERTION_TEXT_FORMAT (int): The insertion text is invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_MUST_BE_KEYWORD = 2
INVALID_INSERTION_TEXT_FORMAT = 3
'''
AdParameterErrorEnum = AdParameterErrorEnum() # For __getattribute__
class AdServingOptimizationStatusEnum(_CreateEnumTypeUponFirstAccess):
AdServingOptimizationStatusEnum = '''\
class AdServingOptimizationStatus(enum.IntEnum):
"""
Enum describing possible serving statuses.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
OPTIMIZE (int): Ad serving is optimized based on CTR for the campaign.
CONVERSION_OPTIMIZE (int): Ad serving is optimized based on CTR \* Conversion for the campaign. If
the campaign is not in the conversion optimizer bidding strategy, it
will default to OPTIMIZED.
ROTATE (int): Ads are rotated evenly for 90 days, then optimized for clicks.
ROTATE_INDEFINITELY (int): Show lower performing ads more evenly with higher performing ads, and do
not optimize.
UNAVAILABLE (int): Ad serving optimization status is not available.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE = 2
CONVERSION_OPTIMIZE = 3
ROTATE = 4
ROTATE_INDEFINITELY = 5
UNAVAILABLE = 6
'''
AdServingOptimizationStatusEnum = AdServingOptimizationStatusEnum() # For __getattribute__
class AdSharingErrorEnum(_CreateEnumTypeUponFirstAccess):
AdSharingErrorEnum = '''\
class AdSharingError(enum.IntEnum):
"""
Enum describing possible ad sharing errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AD_GROUP_ALREADY_CONTAINS_AD (int): Error resulting in attempting to add an Ad to an AdGroup that already
contains the Ad.
INCOMPATIBLE_AD_UNDER_AD_GROUP (int): Ad is not compatible with the AdGroup it is being shared with.
CANNOT_SHARE_INACTIVE_AD (int): Cannot add AdGroupAd on inactive Ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_ALREADY_CONTAINS_AD = 2
INCOMPATIBLE_AD_UNDER_AD_GROUP = 3
CANNOT_SHARE_INACTIVE_AD = 4
'''
AdSharingErrorEnum = AdSharingErrorEnum() # For __getattribute__
class AdStrengthEnum(_CreateEnumTypeUponFirstAccess):
AdStrengthEnum = '''\
class AdStrength(enum.IntEnum):
"""
Enum listing the possible ad strengths.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The ad strength is currently pending.
NO_ADS (int): No ads could be generated.
POOR (int): Poor strength.
AVERAGE (int): Average strength.
GOOD (int): Good strength.
EXCELLENT (int): Excellent strength.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
NO_ADS = 3
POOR = 4
AVERAGE = 5
GOOD = 6
EXCELLENT = 7
'''
AdStrengthEnum = AdStrengthEnum() # For __getattribute__
class AdTypeEnum(_CreateEnumTypeUponFirstAccess):
AdTypeEnum = '''\
class AdType(enum.IntEnum):
"""
The possible types of an ad.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
TEXT_AD (int): The ad is a text ad.
EXPANDED_TEXT_AD (int): The ad is an expanded text ad.
CALL_ONLY_AD (int): The ad is a call only ad.
EXPANDED_DYNAMIC_SEARCH_AD (int): The ad is an expanded dynamic search ad.
HOTEL_AD (int): The ad is a hotel ad.
SHOPPING_SMART_AD (int): The ad is a Smart Shopping ad.
SHOPPING_PRODUCT_AD (int): The ad is a standard Shopping ad.
VIDEO_AD (int): The ad is a video ad.
GMAIL_AD (int): This ad is a Gmail ad.
IMAGE_AD (int): This ad is an Image ad.
RESPONSIVE_SEARCH_AD (int): The ad is a responsive search ad.
LEGACY_RESPONSIVE_DISPLAY_AD (int): The ad is a legacy responsive display ad.
APP_AD (int): The ad is an app ad.
LEGACY_APP_INSTALL_AD (int): The ad is a legacy app install ad.
RESPONSIVE_DISPLAY_AD (int): The ad is a responsive display ad.
HTML5_UPLOAD_AD (int): The ad is a display upload ad with the HTML5\_UPLOAD\_AD product type.
DYNAMIC_HTML5_AD (int): The ad is a display upload ad with one of the DYNAMIC\_HTML5\_\* product
types.
APP_ENGAGEMENT_AD (int): The ad is an app engagement ad.
SHOPPING_COMPARISON_LISTING_AD (int): The ad is a Shopping Comparison Listing ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TEXT_AD = 2
EXPANDED_TEXT_AD = 3
CALL_ONLY_AD = 6
EXPANDED_DYNAMIC_SEARCH_AD = 7
HOTEL_AD = 8
SHOPPING_SMART_AD = 9
SHOPPING_PRODUCT_AD = 10
VIDEO_AD = 12
GMAIL_AD = 13
IMAGE_AD = 14
RESPONSIVE_SEARCH_AD = 15
LEGACY_RESPONSIVE_DISPLAY_AD = 16
APP_AD = 17
LEGACY_APP_INSTALL_AD = 18
RESPONSIVE_DISPLAY_AD = 19
HTML5_UPLOAD_AD = 21
DYNAMIC_HTML5_AD = 22
APP_ENGAGEMENT_AD = 23
SHOPPING_COMPARISON_LISTING_AD = 24
'''
AdTypeEnum = AdTypeEnum() # For __getattribute__
class AdvertisingChannelSubTypeEnum(_CreateEnumTypeUponFirstAccess):
AdvertisingChannelSubTypeEnum = '''\
class AdvertisingChannelSubType(enum.IntEnum):
"""
Enum describing the different channel subtypes.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used as a return value only. Represents value unknown in this version.
SEARCH_MOBILE_APP (int): Mobile app campaigns for Search.
DISPLAY_MOBILE_APP (int): Mobile app campaigns for Display.
SEARCH_EXPRESS (int): AdWords express campaigns for search.
DISPLAY_EXPRESS (int): AdWords Express campaigns for display.
SHOPPING_SMART_ADS (int): Smart Shopping campaigns.
DISPLAY_GMAIL_AD (int): Gmail Ad campaigns.
DISPLAY_SMART_CAMPAIGN (int): Smart display campaigns.
VIDEO_OUTSTREAM (int): Video Outstream campaigns.
VIDEO_ACTION (int): Video TrueView for Action campaigns.
VIDEO_NON_SKIPPABLE (int): Video campaigns with non-skippable video ads.
APP_CAMPAIGN (int): App Campaign that allows you to easily promote your Android or iOS app
across Google's top properties including Search, Play, YouTube, and the
Google Display Network.
APP_CAMPAIGN_FOR_ENGAGEMENT (int): App Campaign for engagement, focused on driving re-engagement with the
app across several of Google’s top properties including Search, YouTube,
and the Google Display Network.
SHOPPING_COMPARISON_LISTING_ADS (int): Shopping Comparison Listing campaigns.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_MOBILE_APP = 2
DISPLAY_MOBILE_APP = 3
SEARCH_EXPRESS = 4
DISPLAY_EXPRESS = 5
SHOPPING_SMART_ADS = 6
DISPLAY_GMAIL_AD = 7
DISPLAY_SMART_CAMPAIGN = 8
VIDEO_OUTSTREAM = 9
VIDEO_ACTION = 10
VIDEO_NON_SKIPPABLE = 11
APP_CAMPAIGN = 12
APP_CAMPAIGN_FOR_ENGAGEMENT = 13
SHOPPING_COMPARISON_LISTING_ADS = 15
'''
AdvertisingChannelSubTypeEnum = AdvertisingChannelSubTypeEnum() # For __getattribute__
class AdvertisingChannelTypeEnum(_CreateEnumTypeUponFirstAccess):
AdvertisingChannelTypeEnum = '''\
class AdvertisingChannelType(enum.IntEnum):
"""
Enum describing the various advertising channel types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SEARCH (int): Search Network. Includes display bundled, and Search+ campaigns.
DISPLAY (int): Google Display Network only.
SHOPPING (int): Shopping campaigns serve on the shopping property
and on google.com search results.
HOTEL (int): Hotel Ads campaigns.
VIDEO (int): Video campaigns.
MULTI_CHANNEL (int): App Campaigns, and App Campaigns for Engagement, that run
across multiple channels.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH = 2
DISPLAY = 3
SHOPPING = 4
HOTEL = 5
VIDEO = 6
MULTI_CHANNEL = 7
'''
AdvertisingChannelTypeEnum = AdvertisingChannelTypeEnum() # For __getattribute__
class AdxErrorEnum(_CreateEnumTypeUponFirstAccess):
AdxErrorEnum = '''\
class AdxError(enum.IntEnum):
"""
Enum describing possible adx errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
UNSUPPORTED_FEATURE (int): Attempt to use non-AdX feature by AdX customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNSUPPORTED_FEATURE = 2
'''
AdxErrorEnum = AdxErrorEnum() # For __getattribute__
class AffiliateLocationFeedRelationshipTypeEnum(_CreateEnumTypeUponFirstAccess):
AffiliateLocationFeedRelationshipTypeEnum = '''\
class AffiliateLocationFeedRelationshipType(enum.IntEnum):
"""
Possible values for a relationship type for an affiliate location feed.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
GENERAL_RETAILER (int): General retailer relationship.
"""
UNSPECIFIED = 0
UNKNOWN = 1
GENERAL_RETAILER = 2
'''
AffiliateLocationFeedRelationshipTypeEnum = AffiliateLocationFeedRelationshipTypeEnum() # For __getattribute__
class AffiliateLocationPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
AffiliateLocationPlaceholderFieldEnum = '''\
class AffiliateLocationPlaceholderField(enum.IntEnum):
"""
Possible values for Affiliate Location placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BUSINESS_NAME (int): Data Type: STRING. The name of the business.
ADDRESS_LINE_1 (int): Data Type: STRING. Line 1 of the business address.
ADDRESS_LINE_2 (int): Data Type: STRING. Line 2 of the business address.
CITY (int): Data Type: STRING. City of the business address.
PROVINCE (int): Data Type: STRING. Province of the business address.
POSTAL_CODE (int): Data Type: STRING. Postal code of the business address.
COUNTRY_CODE (int): Data Type: STRING. Country code of the business address.
PHONE_NUMBER (int): Data Type: STRING. Phone number of the business.
LANGUAGE_CODE (int): Data Type: STRING. Language code of the business.
CHAIN_ID (int): Data Type: INT64. ID of the chain.
CHAIN_NAME (int): Data Type: STRING. Name of the chain.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUSINESS_NAME = 2
ADDRESS_LINE_1 = 3
ADDRESS_LINE_2 = 4
CITY = 5
PROVINCE = 6
POSTAL_CODE = 7
COUNTRY_CODE = 8
PHONE_NUMBER = 9
LANGUAGE_CODE = 10
CHAIN_ID = 11
CHAIN_NAME = 12
'''
AffiliateLocationPlaceholderFieldEnum = AffiliateLocationPlaceholderFieldEnum() # For __getattribute__
class AgeRangeTypeEnum(_CreateEnumTypeUponFirstAccess):
AgeRangeTypeEnum = '''\
class AgeRangeType(enum.IntEnum):
"""
The type of demographic age ranges (e.g. between 18 and 24 years old).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AGE_RANGE_18_24 (int): Between 18 and 24 years old.
AGE_RANGE_25_34 (int): Between 25 and 34 years old.
AGE_RANGE_35_44 (int): Between 35 and 44 years old.
AGE_RANGE_45_54 (int): Between 45 and 54 years old.
AGE_RANGE_55_64 (int): Between 55 and 64 years old.
AGE_RANGE_65_UP (int): 65 years old and beyond.
AGE_RANGE_UNDETERMINED (int): Undetermined age range.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AGE_RANGE_18_24 = 503001
AGE_RANGE_25_34 = 503002
AGE_RANGE_35_44 = 503003
AGE_RANGE_45_54 = 503004
AGE_RANGE_55_64 = 503005
AGE_RANGE_65_UP = 503006
AGE_RANGE_UNDETERMINED = 503999
'''
AgeRangeTypeEnum = AgeRangeTypeEnum() # For __getattribute__
class AppCampaignAppStoreEnum(_CreateEnumTypeUponFirstAccess):
AppCampaignAppStoreEnum = '''\
class AppCampaignAppStore(enum.IntEnum):
"""
Enum describing app campaign app store.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPLE_APP_STORE (int): Apple app store.
GOOGLE_APP_STORE (int): Google play.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPLE_APP_STORE = 2
GOOGLE_APP_STORE = 3
'''
AppCampaignAppStoreEnum = AppCampaignAppStoreEnum() # For __getattribute__
class AppCampaignBiddingStrategyGoalTypeEnum(_CreateEnumTypeUponFirstAccess):
AppCampaignBiddingStrategyGoalTypeEnum = '''\
class AppCampaignBiddingStrategyGoalType(enum.IntEnum):
"""
Goal type of App campaign BiddingStrategy.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
OPTIMIZE_INSTALLS_TARGET_INSTALL_COST (int): Aim to maximize the number of app installs. The cpa bid is the
target cost per install.
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_INSTALL_COST (int): Aim to maximize the long term number of selected in-app conversions from
app installs. The cpa bid is the target cost per install.
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_CONVERSION_COST (int): Aim to maximize the long term number of selected in-app conversions from
app installs. The cpa bid is the target cost per in-app conversion. Note
that the actual cpa may seem higher than the target cpa at first, since
the long term conversions haven’t happened yet.
OPTIMIZE_RETURN_ON_ADVERTISING_SPEND (int): Aim to maximize all conversions' value, i.e. install + selected in-app
conversions while achieving or exceeding target return on advertising
spend.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPTIMIZE_INSTALLS_TARGET_INSTALL_COST = 2
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_INSTALL_COST = 3
OPTIMIZE_IN_APP_CONVERSIONS_TARGET_CONVERSION_COST = 4
OPTIMIZE_RETURN_ON_ADVERTISING_SPEND = 5
'''
AppCampaignBiddingStrategyGoalTypeEnum = AppCampaignBiddingStrategyGoalTypeEnum() # For __getattribute__
class AppPaymentModelTypeEnum(_CreateEnumTypeUponFirstAccess):
AppPaymentModelTypeEnum = '''\
class AppPaymentModelType(enum.IntEnum):
"""
Enum describing possible app payment models.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PAID (int): Represents paid-for apps.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PAID = 30
'''
AppPaymentModelTypeEnum = AppPaymentModelTypeEnum() # For __getattribute__
class AppPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
AppPlaceholderFieldEnum = '''\
class AppPlaceholderField(enum.IntEnum):
"""
Possible values for App placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STORE (int): Data Type: INT64. The application store that the target application
belongs to. Valid values are: 1 = Apple iTunes Store; 2 = Google Play
Store.
ID (int): Data Type: STRING. The store-specific ID for the target application.
LINK_TEXT (int): Data Type: STRING. The visible text displayed when the link is rendered
in an ad.
URL (int): Data Type: STRING. The destination URL of the in-app link.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs for the in-app link when using Upgraded
URLs.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final Mobile URLs for the in-app link when using
Upgraded URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the in-app link when using Upgraded
URLs.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for the in-app link when using
parallel tracking.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STORE = 2
ID = 3
LINK_TEXT = 4
URL = 5
FINAL_URLS = 6
FINAL_MOBILE_URLS = 7
TRACKING_URL = 8
FINAL_URL_SUFFIX = 9
'''
AppPlaceholderFieldEnum = AppPlaceholderFieldEnum() # For __getattribute__
class AppStoreEnum(_CreateEnumTypeUponFirstAccess):
AppStoreEnum = '''\
class AppStore(enum.IntEnum):
"""
App store type in an app extension.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPLE_ITUNES (int): Apple iTunes.
GOOGLE_PLAY (int): Google Play.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPLE_ITUNES = 2
GOOGLE_PLAY = 3
'''
AppStoreEnum = AppStoreEnum() # For __getattribute__
class AppUrlOperatingSystemTypeEnum(_CreateEnumTypeUponFirstAccess):
AppUrlOperatingSystemTypeEnum = '''\
class AppUrlOperatingSystemType(enum.IntEnum):
"""
Operating System
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
IOS (int): The Apple IOS operating system.
ANDROID (int): The Android operating system.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IOS = 2
ANDROID = 3
'''
AppUrlOperatingSystemTypeEnum = AppUrlOperatingSystemTypeEnum() # For __getattribute__
class AssetErrorEnum(_CreateEnumTypeUponFirstAccess):
AssetErrorEnum = '''\
class AssetError(enum.IntEnum):
"""
Enum describing possible asset errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CUSTOMER_NOT_WHITELISTED_FOR_ASSET_TYPE (int): The customer is not whitelisted for this asset type.
DUPLICATE_ASSET (int): Assets are duplicated across operations.
DUPLICATE_ASSET_NAME (int): The asset name is duplicated, either across operations or with an
existing asset.
ASSET_DATA_IS_MISSING (int): The Asset.asset\_data oneof is empty.
CANNOT_MODIFY_ASSET_NAME (int): The asset has a name which is different from an existing duplicate that
represents the same content.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_NOT_WHITELISTED_FOR_ASSET_TYPE = 2
DUPLICATE_ASSET = 3
DUPLICATE_ASSET_NAME = 4
ASSET_DATA_IS_MISSING = 5
CANNOT_MODIFY_ASSET_NAME = 6
'''
AssetErrorEnum = AssetErrorEnum() # For __getattribute__
class AssetFieldTypeEnum(_CreateEnumTypeUponFirstAccess):
AssetFieldTypeEnum = '''\
class AssetFieldType(enum.IntEnum):
"""
Enum describing the possible placements of an asset.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
HEADLINE (int): The asset is linked for use as a headline.
DESCRIPTION (int): The asset is linked for use as a description.
MANDATORY_AD_TEXT (int): The asset is linked for use as mandatory ad text.
MARKETING_IMAGE (int): The asset is linked for use as a marketing image.
MEDIA_BUNDLE (int): The asset is linked for use as a media bundle.
YOUTUBE_VIDEO (int): The asset is linked for use as a YouTube video.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HEADLINE = 2
DESCRIPTION = 3
MANDATORY_AD_TEXT = 4
MARKETING_IMAGE = 5
MEDIA_BUNDLE = 6
YOUTUBE_VIDEO = 7
'''
AssetFieldTypeEnum = AssetFieldTypeEnum() # For __getattribute__
class AssetPerformanceLabelEnum(_CreateEnumTypeUponFirstAccess):
AssetPerformanceLabelEnum = '''\
class AssetPerformanceLabel(enum.IntEnum):
"""
Enum describing the possible performance labels of an asset, usually
computed in the context of a linkage.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): This asset does not yet have any performance informantion. This may be
because it is still under review.
LEARNING (int): The asset has started getting impressions but the stats are not
statistically significant enough to get an asset performance label.
LOW (int): Worst performing assets.
GOOD (int): Good performing assets.
BEST (int): Best performing assets.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
LEARNING = 3
LOW = 4
GOOD = 5
BEST = 6
'''
AssetPerformanceLabelEnum = AssetPerformanceLabelEnum() # For __getattribute__
class AssetTypeEnum(_CreateEnumTypeUponFirstAccess):
AssetTypeEnum = '''\
class AssetType(enum.IntEnum):
"""
Enum describing possible types of asset.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
YOUTUBE_VIDEO (int): YouTube video asset.
MEDIA_BUNDLE (int): Media bundle asset.
IMAGE (int): Image asset.
TEXT (int): Text asset.
"""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE_VIDEO = 2
MEDIA_BUNDLE = 3
IMAGE = 4
TEXT = 5
'''
AssetTypeEnum = AssetTypeEnum() # For __getattribute__
class AttributionModelEnum(_CreateEnumTypeUponFirstAccess):
AttributionModelEnum = '''\
class AttributionModel(enum.IntEnum):
"""
The attribution model that describes how to distribute credit for a
particular conversion across potentially many prior interactions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EXTERNAL (int): Uses external attribution.
GOOGLE_ADS_LAST_CLICK (int): Attributes all credit for a conversion to its last click.
GOOGLE_SEARCH_ATTRIBUTION_FIRST_CLICK (int): Attributes all credit for a conversion to its first click using Google
Search attribution.
GOOGLE_SEARCH_ATTRIBUTION_LINEAR (int): Attributes credit for a conversion equally across all of its clicks using
Google Search attribution.
GOOGLE_SEARCH_ATTRIBUTION_TIME_DECAY (int): Attributes exponentially more credit for a conversion to its more recent
clicks using Google Search attribution (half-life is 1 week).
GOOGLE_SEARCH_ATTRIBUTION_POSITION_BASED (int): Attributes 40% of the credit for a conversion to its first and last
clicks. Remaining 20% is evenly distributed across all other clicks. This
uses Google Search attribution.
GOOGLE_SEARCH_ATTRIBUTION_DATA_DRIVEN (int): Flexible model that uses machine learning to determine the appropriate
distribution of credit among clicks using Google Search attribution.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXTERNAL = 100
GOOGLE_ADS_LAST_CLICK = 101
GOOGLE_SEARCH_ATTRIBUTION_FIRST_CLICK = 102
GOOGLE_SEARCH_ATTRIBUTION_LINEAR = 103
GOOGLE_SEARCH_ATTRIBUTION_TIME_DECAY = 104
GOOGLE_SEARCH_ATTRIBUTION_POSITION_BASED = 105
GOOGLE_SEARCH_ATTRIBUTION_DATA_DRIVEN = 106
'''
AttributionModelEnum = AttributionModelEnum() # For __getattribute__
class AuthenticationErrorEnum(_CreateEnumTypeUponFirstAccess):
AuthenticationErrorEnum = '''\
class AuthenticationError(enum.IntEnum):
"""
Enum describing possible authentication errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
AUTHENTICATION_ERROR (int): Authentication of the request failed.
CLIENT_CUSTOMER_ID_INVALID (int): Client Customer Id is not a number.
CUSTOMER_NOT_FOUND (int): No customer found for the provided customer id.
GOOGLE_ACCOUNT_DELETED (int): Client's Google Account is deleted.
GOOGLE_ACCOUNT_COOKIE_INVALID (int): Google account login token in the cookie is invalid.
GOOGLE_ACCOUNT_AUTHENTICATION_FAILED (int): A problem occurred during Google account authentication.
GOOGLE_ACCOUNT_USER_AND_ADS_USER_MISMATCH (int): The user in the google account login token does not match the UserId in
the cookie.
LOGIN_COOKIE_REQUIRED (int): Login cookie is required for authentication.
NOT_ADS_USER (int): User in the cookie is not a valid Ads user.
OAUTH_TOKEN_INVALID (int): Oauth token in the header is not valid.
OAUTH_TOKEN_EXPIRED (int): Oauth token in the header has expired.
OAUTH_TOKEN_DISABLED (int): Oauth token in the header has been disabled.
OAUTH_TOKEN_REVOKED (int): Oauth token in the header has been revoked.
OAUTH_TOKEN_HEADER_INVALID (int): Oauth token HTTP header is malformed.
LOGIN_COOKIE_INVALID (int): Login cookie is not valid.
USER_ID_INVALID (int): User Id in the header is not a valid id.
TWO_STEP_VERIFICATION_NOT_ENROLLED (int): An account administrator changed this account's authentication settings.
To access this Google Ads account, enable 2-Step Verification in your
Google account at https://www.google.com/landing/2step.
ADVANCED_PROTECTION_NOT_ENROLLED (int): An account administrator changed this account's authentication settings.
To access this Google Ads account, enable Advanced Protection in your
Google account at https://landing.google.com/advancedprotection.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AUTHENTICATION_ERROR = 2
CLIENT_CUSTOMER_ID_INVALID = 5
CUSTOMER_NOT_FOUND = 8
GOOGLE_ACCOUNT_DELETED = 9
GOOGLE_ACCOUNT_COOKIE_INVALID = 10
GOOGLE_ACCOUNT_AUTHENTICATION_FAILED = 25
GOOGLE_ACCOUNT_USER_AND_ADS_USER_MISMATCH = 12
LOGIN_COOKIE_REQUIRED = 13
NOT_ADS_USER = 14
OAUTH_TOKEN_INVALID = 15
OAUTH_TOKEN_EXPIRED = 16
OAUTH_TOKEN_DISABLED = 17
OAUTH_TOKEN_REVOKED = 18
OAUTH_TOKEN_HEADER_INVALID = 19
LOGIN_COOKIE_INVALID = 20
USER_ID_INVALID = 22
TWO_STEP_VERIFICATION_NOT_ENROLLED = 23
ADVANCED_PROTECTION_NOT_ENROLLED = 24
'''
AuthenticationErrorEnum = AuthenticationErrorEnum() # For __getattribute__
class AuthorizationErrorEnum(_CreateEnumTypeUponFirstAccess):
AuthorizationErrorEnum = '''\
class AuthorizationError(enum.IntEnum):
"""
Enum describing possible authorization errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
USER_PERMISSION_DENIED (int): User doesn't have permission to access customer. Note: If you're
accessing a client customer, the manager's customer ID must be set in
the ``login-customer-id`` header. Learn more at
https://developers.google.com/google-ads/api/docs/concepts/call-structure#cid
DEVELOPER_TOKEN_NOT_WHITELISTED (int): The developer token is not whitelisted.
DEVELOPER_TOKEN_PROHIBITED (int): The developer token is not allowed with the project sent in the request.
PROJECT_DISABLED (int): The Google Cloud project sent in the request does not have permission to
access the api.
AUTHORIZATION_ERROR (int): Authorization of the client failed.
ACTION_NOT_PERMITTED (int): The user does not have permission to perform this action
(e.g., ADD, UPDATE, REMOVE) on the resource or call a method.
INCOMPLETE_SIGNUP (int): Signup not complete.
CUSTOMER_NOT_ENABLED (int): The customer can't be used because it isn't enabled.
MISSING_TOS (int): The developer must sign the terms of service. They can be found here:
ads.google.com/aw/apicenter
DEVELOPER_TOKEN_NOT_APPROVED (int): The developer token is not approved. Non-approved developer tokens can
only be used with test accounts.
INVALID_LOGIN_CUSTOMER_ID_SERVING_CUSTOMER_ID_COMBINATION (int): The login customer specified does not have access to the account
specified, so the request is invalid.
SERVICE_ACCESS_DENIED (int): The developer specified does not have access to the service.
"""
UNSPECIFIED = 0
UNKNOWN = 1
USER_PERMISSION_DENIED = 2
DEVELOPER_TOKEN_NOT_WHITELISTED = 3
DEVELOPER_TOKEN_PROHIBITED = 4
PROJECT_DISABLED = 5
AUTHORIZATION_ERROR = 6
ACTION_NOT_PERMITTED = 7
INCOMPLETE_SIGNUP = 8
CUSTOMER_NOT_ENABLED = 24
MISSING_TOS = 9
DEVELOPER_TOKEN_NOT_APPROVED = 10
INVALID_LOGIN_CUSTOMER_ID_SERVING_CUSTOMER_ID_COMBINATION = 11
SERVICE_ACCESS_DENIED = 12
'''
AuthorizationErrorEnum = AuthorizationErrorEnum() # For __getattribute__
class BidModifierSourceEnum(_CreateEnumTypeUponFirstAccess):
BidModifierSourceEnum = '''\
class BidModifierSource(enum.IntEnum):
"""
Enum describing possible bid modifier sources.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN (int): The bid modifier is specified at the campaign level, on the campaign
level criterion.
AD_GROUP (int): The bid modifier is specified (overridden) at the ad group level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN = 2
AD_GROUP = 3
'''
BidModifierSourceEnum = BidModifierSourceEnum() # For __getattribute__
class BiddingErrorEnum(_CreateEnumTypeUponFirstAccess):
BiddingErrorEnum = '''\
class BiddingError(enum.IntEnum):
"""
Enum describing possible bidding errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BIDDING_STRATEGY_TRANSITION_NOT_ALLOWED (int): Cannot transition to new bidding strategy.
CANNOT_ATTACH_BIDDING_STRATEGY_TO_CAMPAIGN (int): Cannot attach bidding strategy to campaign.
INVALID_ANONYMOUS_BIDDING_STRATEGY_TYPE (int): Bidding strategy is not supported or cannot be used as anonymous.
INVALID_BIDDING_STRATEGY_TYPE (int): The type does not match the named strategy's type.
INVALID_BID (int): The bid is invalid.
BIDDING_STRATEGY_NOT_AVAILABLE_FOR_ACCOUNT_TYPE (int): Bidding strategy is not available for the account type.
CONVERSION_TRACKING_NOT_ENABLED (int): Conversion tracking is not enabled for the campaign for VBB transition.
NOT_ENOUGH_CONVERSIONS (int): Not enough conversions tracked for VBB transitions.
CANNOT_CREATE_CAMPAIGN_WITH_BIDDING_STRATEGY (int): Campaign can not be created with given bidding strategy. It can be
transitioned to the strategy, once eligible.
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CAMPAIGN_LEVEL_POP_BIDDING_STRATEGY (int): Cannot target content network only as campaign uses Page One Promoted
bidding strategy.
BIDDING_STRATEGY_NOT_SUPPORTED_WITH_AD_SCHEDULE (int): Budget Optimizer and Target Spend bidding strategies are not supported
for campaigns with AdSchedule targeting.
PAY_PER_CONVERSION_NOT_AVAILABLE_FOR_CUSTOMER (int): Pay per conversion is not available to all the customer, only few
whitelisted customers can use this.
PAY_PER_CONVERSION_NOT_ALLOWED_WITH_TARGET_CPA (int): Pay per conversion is not allowed with Target CPA.
BIDDING_STRATEGY_NOT_ALLOWED_FOR_SEARCH_ONLY_CAMPAIGNS (int): Cannot set bidding strategy to Manual CPM for search network only
campaigns.
BIDDING_STRATEGY_NOT_SUPPORTED_IN_DRAFTS_OR_EXPERIMENTS (int): The bidding strategy is not supported for use in drafts or experiments.
BIDDING_STRATEGY_TYPE_DOES_NOT_SUPPORT_PRODUCT_TYPE_ADGROUP_CRITERION (int): Bidding strategy type does not support product type ad group criterion.
BID_TOO_SMALL (int): Bid amount is too small.
BID_TOO_BIG (int): Bid amount is too big.
BID_TOO_MANY_FRACTIONAL_DIGITS (int): Bid has too many fractional digit precision.
INVALID_DOMAIN_NAME (int): Invalid domain name specified.
NOT_COMPATIBLE_WITH_PAYMENT_MODE (int): The field is not compatible with the payment mode.
NOT_COMPATIBLE_WITH_BUDGET_TYPE (int): The field is not compatible with the budget type.
NOT_COMPATIBLE_WITH_BIDDING_STRATEGY_TYPE (int): The field is not compatible with the bidding strategy type.
BIDDING_STRATEGY_TYPE_INCOMPATIBLE_WITH_SHARED_BUDGET (int): Bidding strategy type is incompatible with shared budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BIDDING_STRATEGY_TRANSITION_NOT_ALLOWED = 2
CANNOT_ATTACH_BIDDING_STRATEGY_TO_CAMPAIGN = 7
INVALID_ANONYMOUS_BIDDING_STRATEGY_TYPE = 10
INVALID_BIDDING_STRATEGY_TYPE = 14
INVALID_BID = 17
BIDDING_STRATEGY_NOT_AVAILABLE_FOR_ACCOUNT_TYPE = 18
CONVERSION_TRACKING_NOT_ENABLED = 19
NOT_ENOUGH_CONVERSIONS = 20
CANNOT_CREATE_CAMPAIGN_WITH_BIDDING_STRATEGY = 21
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CAMPAIGN_LEVEL_POP_BIDDING_STRATEGY = 23
BIDDING_STRATEGY_NOT_SUPPORTED_WITH_AD_SCHEDULE = 24
PAY_PER_CONVERSION_NOT_AVAILABLE_FOR_CUSTOMER = 25
PAY_PER_CONVERSION_NOT_ALLOWED_WITH_TARGET_CPA = 26
BIDDING_STRATEGY_NOT_ALLOWED_FOR_SEARCH_ONLY_CAMPAIGNS = 27
BIDDING_STRATEGY_NOT_SUPPORTED_IN_DRAFTS_OR_EXPERIMENTS = 28
BIDDING_STRATEGY_TYPE_DOES_NOT_SUPPORT_PRODUCT_TYPE_ADGROUP_CRITERION = 29
BID_TOO_SMALL = 30
BID_TOO_BIG = 31
BID_TOO_MANY_FRACTIONAL_DIGITS = 32
INVALID_DOMAIN_NAME = 33
NOT_COMPATIBLE_WITH_PAYMENT_MODE = 34
NOT_COMPATIBLE_WITH_BUDGET_TYPE = 35
NOT_COMPATIBLE_WITH_BIDDING_STRATEGY_TYPE = 36
BIDDING_STRATEGY_TYPE_INCOMPATIBLE_WITH_SHARED_BUDGET = 37
'''
BiddingErrorEnum = BiddingErrorEnum() # For __getattribute__
class BiddingSourceEnum(_CreateEnumTypeUponFirstAccess):
BiddingSourceEnum = '''\
class BiddingSource(enum.IntEnum):
"""
Indicates where a bid or target is defined. For example, an ad group
criterion may define a cpc bid directly, or it can inherit its cpc bid from
the ad group.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN_BIDDING_STRATEGY (int): Effective bid or target is inherited from campaign bidding strategy.
AD_GROUP (int): The bid or target is defined on the ad group.
AD_GROUP_CRITERION (int): The bid or target is defined on the ad group criterion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_BIDDING_STRATEGY = 5
AD_GROUP = 6
AD_GROUP_CRITERION = 7
'''
BiddingSourceEnum = BiddingSourceEnum() # For __getattribute__
class BiddingStrategyErrorEnum(_CreateEnumTypeUponFirstAccess):
BiddingStrategyErrorEnum = '''\
class BiddingStrategyError(enum.IntEnum):
"""
Enum describing possible bidding strategy errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_NAME (int): Each bidding strategy must have a unique name.
CANNOT_CHANGE_BIDDING_STRATEGY_TYPE (int): Bidding strategy type is immutable.
CANNOT_REMOVE_ASSOCIATED_STRATEGY (int): Only bidding strategies not linked to campaigns, adgroups or adgroup
criteria can be removed.
BIDDING_STRATEGY_NOT_SUPPORTED (int): The specified bidding strategy is not supported.
INCOMPATIBLE_BIDDING_STRATEGY_AND_BIDDING_STRATEGY_GOAL_TYPE (int): The bidding strategy is incompatible with the campaign's bidding
strategy goal type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
CANNOT_CHANGE_BIDDING_STRATEGY_TYPE = 3
CANNOT_REMOVE_ASSOCIATED_STRATEGY = 4
BIDDING_STRATEGY_NOT_SUPPORTED = 5
INCOMPATIBLE_BIDDING_STRATEGY_AND_BIDDING_STRATEGY_GOAL_TYPE = 6
'''
BiddingStrategyErrorEnum = BiddingStrategyErrorEnum() # For __getattribute__
class BiddingStrategyStatusEnum(_CreateEnumTypeUponFirstAccess):
BiddingStrategyStatusEnum = '''\
class BiddingStrategyStatus(enum.IntEnum):
"""
The possible statuses of a BiddingStrategy.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The bidding strategy is enabled.
REMOVED (int): The bidding strategy is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 4
'''
BiddingStrategyStatusEnum = BiddingStrategyStatusEnum() # For __getattribute__
class BiddingStrategyTypeEnum(_CreateEnumTypeUponFirstAccess):
BiddingStrategyTypeEnum = '''\
class BiddingStrategyType(enum.IntEnum):
"""
Enum describing possible bidding strategy types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
COMMISSION (int): Commission is an automatic bidding strategy in which the advertiser pays
a certain portion of the conversion value.
ENHANCED_CPC (int): Enhanced CPC is a bidding strategy that raises bids for clicks
that seem more likely to lead to a conversion and lowers
them for clicks where they seem less likely.
MANUAL_CPC (int): Manual click based bidding where user pays per click.
MANUAL_CPM (int): Manual impression based bidding
where user pays per thousand impressions.
MANUAL_CPV (int): A bidding strategy that pays a configurable amount per video view.
MAXIMIZE_CONVERSIONS (int): A bidding strategy that automatically maximizes number of conversions
given a daily budget.
MAXIMIZE_CONVERSION_VALUE (int): An automated bidding strategy that automatically sets bids to maximize
revenue while spending your budget.
PAGE_ONE_PROMOTED (int): Page-One Promoted bidding scheme, which sets max cpc bids to
target impressions on page one or page one promoted slots on google.com.
This enum value is deprecated.
PERCENT_CPC (int): Percent Cpc is bidding strategy where bids are a fraction of the
advertised price for some good or service.
TARGET_CPA (int): Target CPA is an automated bid strategy that sets bids
to help get as many conversions as possible
at the target cost-per-acquisition (CPA) you set.
TARGET_CPM (int): Target CPM is an automated bid strategy that sets bids to help get
as many impressions as possible at the target cost per one thousand
impressions (CPM) you set.
TARGET_IMPRESSION_SHARE (int): An automated bidding strategy that sets bids so that a certain percentage
of search ads are shown at the top of the first page (or other targeted
location).
TARGET_OUTRANK_SHARE (int): Target Outrank Share is an automated bidding strategy that sets bids
based on the target fraction of auctions where the advertiser
should outrank a specific competitor.
This enum value is deprecated.
TARGET_ROAS (int): Target ROAS is an automated bidding strategy
that helps you maximize revenue while averaging
a specific target Return On Average Spend (ROAS).
TARGET_SPEND (int): Target Spend is an automated bid strategy that sets your bids
to help get as many clicks as possible within your budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
COMMISSION = 16
ENHANCED_CPC = 2
MANUAL_CPC = 3
MANUAL_CPM = 4
MANUAL_CPV = 13
MAXIMIZE_CONVERSIONS = 10
MAXIMIZE_CONVERSION_VALUE = 11
PAGE_ONE_PROMOTED = 5
PERCENT_CPC = 12
TARGET_CPA = 6
TARGET_CPM = 14
TARGET_IMPRESSION_SHARE = 15
TARGET_OUTRANK_SHARE = 7
TARGET_ROAS = 8
TARGET_SPEND = 9
'''
BiddingStrategyTypeEnum = BiddingStrategyTypeEnum() # For __getattribute__
class BillingSetupErrorEnum(_CreateEnumTypeUponFirstAccess):
BillingSetupErrorEnum = '''\
class BillingSetupError(enum.IntEnum):
"""
Enum describing possible billing setup errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_USE_EXISTING_AND_NEW_ACCOUNT (int): Cannot specify both an existing payments account and a new payments
account when setting up billing.
CANNOT_REMOVE_STARTED_BILLING_SETUP (int): Cannot cancel an approved billing setup whose start time has passed.
CANNOT_CHANGE_BILLING_TO_SAME_PAYMENTS_ACCOUNT (int): Cannot perform a Change of Bill-To (CBT) to the same payments account.
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_STATUS (int): Billing setups can only be used by customers with ENABLED or DRAFT
status.
INVALID_PAYMENTS_ACCOUNT (int): Billing setups must either include a correctly formatted existing
payments account id, or a non-empty new payments account name.
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_CATEGORY (int): Only billable and third-party customers can create billing setups.
INVALID_START_TIME_TYPE (int): Billing setup creations can only use NOW for start time type.
THIRD_PARTY_ALREADY_HAS_BILLING (int): Billing setups can only be created for a third-party customer if they do
not already have a setup.
BILLING_SETUP_IN_PROGRESS (int): Billing setups cannot be created if there is already a pending billing in
progress.
NO_SIGNUP_PERMISSION (int): Billing setups can only be created by customers who have permission to
setup billings. Users can contact a representative for help setting up
permissions.
CHANGE_OF_BILL_TO_IN_PROGRESS (int): Billing setups cannot be created if there is already a future-approved
billing.
PAYMENTS_PROFILE_NOT_FOUND (int): Requested payments profile not found.
PAYMENTS_ACCOUNT_NOT_FOUND (int): Requested payments account not found.
PAYMENTS_PROFILE_INELIGIBLE (int): Billing setup creation failed because the payments profile is ineligible.
PAYMENTS_ACCOUNT_INELIGIBLE (int): Billing setup creation failed because the payments account is ineligible.
CUSTOMER_NEEDS_INTERNAL_APPROVAL (int): Billing setup creation failed because the payments profile needs internal
approval.
PAYMENTS_ACCOUNT_INELIGIBLE_CURRENCY_CODE_MISMATCH (int): Payments account has different currency code than the current customer
and hence cannot be used to setup billing.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_USE_EXISTING_AND_NEW_ACCOUNT = 2
CANNOT_REMOVE_STARTED_BILLING_SETUP = 3
CANNOT_CHANGE_BILLING_TO_SAME_PAYMENTS_ACCOUNT = 4
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_STATUS = 5
INVALID_PAYMENTS_ACCOUNT = 6
BILLING_SETUP_NOT_PERMITTED_FOR_CUSTOMER_CATEGORY = 7
INVALID_START_TIME_TYPE = 8
THIRD_PARTY_ALREADY_HAS_BILLING = 9
BILLING_SETUP_IN_PROGRESS = 10
NO_SIGNUP_PERMISSION = 11
CHANGE_OF_BILL_TO_IN_PROGRESS = 12
PAYMENTS_PROFILE_NOT_FOUND = 13
PAYMENTS_ACCOUNT_NOT_FOUND = 14
PAYMENTS_PROFILE_INELIGIBLE = 15
PAYMENTS_ACCOUNT_INELIGIBLE = 16
CUSTOMER_NEEDS_INTERNAL_APPROVAL = 17
PAYMENTS_ACCOUNT_INELIGIBLE_CURRENCY_CODE_MISMATCH = 19
'''
BillingSetupErrorEnum = BillingSetupErrorEnum() # For __getattribute__
class BillingSetupStatusEnum(_CreateEnumTypeUponFirstAccess):
BillingSetupStatusEnum = '''\
class BillingSetupStatus(enum.IntEnum):
"""
The possible statuses of a BillingSetup.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The billing setup is pending approval.
APPROVED_HELD (int): The billing setup has been approved but the corresponding first budget
has not. This can only occur for billing setups configured for monthly
invoicing.
APPROVED (int): The billing setup has been approved.
CANCELLED (int): The billing setup was cancelled by the user prior to approval.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
'''
BillingSetupStatusEnum = BillingSetupStatusEnum() # For __getattribute__
class BrandSafetySuitabilityEnum(_CreateEnumTypeUponFirstAccess):
BrandSafetySuitabilityEnum = '''\
class BrandSafetySuitability(enum.IntEnum):
"""
3-Tier brand safety suitability control.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EXPANDED_INVENTORY (int): This option lets you show ads across all inventory on YouTube and video
partners that meet our standards for monetization. This option may be an
appropriate choice for brands that want maximum access to the full
breadth of videos eligible for ads, including, for example, videos that
have strong profanity in the context of comedy or a documentary, or
excessive violence as featured in video games.
STANDARD_INVENTORY (int): This option lets you show ads across a wide range of content that's
appropriate for most brands, such as popular music videos, documentaries,
and movie trailers. The content you can show ads on is based on YouTube's
advertiser-friendly content guidelines that take into account, for
example, the strength or frequency of profanity, or the appropriateness
of subject matter like sensitive events. Ads won't show, for example, on
content with repeated strong profanity, strong sexual content, or graphic
violence.
LIMITED_INVENTORY (int): This option lets you show ads on a reduced range of content that's
appropriate for brands with particularly strict guidelines around
inappropriate language and sexual suggestiveness; above and beyond what
YouTube's advertiser-friendly content guidelines address. The videos
accessible in this sensitive category meet heightened requirements,
especially for inappropriate language and sexual suggestiveness. For
example, your ads will be excluded from showing on some of YouTube's most
popular music videos and other pop culture content across YouTube and
Google video partners.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXPANDED_INVENTORY = 2
STANDARD_INVENTORY = 3
LIMITED_INVENTORY = 4
'''
BrandSafetySuitabilityEnum = BrandSafetySuitabilityEnum() # For __getattribute__
class BudgetDeliveryMethodEnum(_CreateEnumTypeUponFirstAccess):
BudgetDeliveryMethodEnum = '''\
class BudgetDeliveryMethod(enum.IntEnum):
"""
Possible delivery methods of a Budget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STANDARD (int): The budget server will throttle serving evenly across
the entire time period.
ACCELERATED (int): The budget server will not throttle serving,
and ads will serve as fast as possible.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STANDARD = 2
ACCELERATED = 3
'''
BudgetDeliveryMethodEnum = BudgetDeliveryMethodEnum() # For __getattribute__
class BudgetPeriodEnum(_CreateEnumTypeUponFirstAccess):
BudgetPeriodEnum = '''\
class BudgetPeriod(enum.IntEnum):
"""
Possible period of a Budget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DAILY (int): Daily budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DAILY = 2
'''
BudgetPeriodEnum = BudgetPeriodEnum() # For __getattribute__
class BudgetStatusEnum(_CreateEnumTypeUponFirstAccess):
BudgetStatusEnum = '''\
class BudgetStatus(enum.IntEnum):
"""
Possible statuses of a Budget.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Budget is enabled.
REMOVED (int): Budget is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
BudgetStatusEnum = BudgetStatusEnum() # For __getattribute__
class BudgetTypeEnum(_CreateEnumTypeUponFirstAccess):
BudgetTypeEnum = '''\
class BudgetType(enum.IntEnum):
"""
Possible Budget types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STANDARD (int): Budget type for standard Google Ads usage.
Caps daily spend at two times the specified budget amount.
Full details: https://support.google.com/google-ads/answer/6385083
HOTEL_ADS_COMMISSION (int): Budget type for Hotels Ads commission program. Full details:
https://support.google.com/google-ads/answer/9243945
This type is only supported by campaigns with
AdvertisingChannelType.HOTEL, BiddingStrategyType.COMMISSION and
PaymentMode.CONVERSION\_VALUE.
FIXED_CPA (int): Budget type with a fixed cost-per-acquisition (conversion). Full
details: https://support.google.com/google-ads/answer/7528254
This type is only supported by campaigns with
AdvertisingChannelType.DISPLAY (excluding
AdvertisingChannelSubType.DISPLAY\_GMAIL),
BiddingStrategyType.TARGET\_CPA and PaymentMode.CONVERSIONS.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STANDARD = 2
HOTEL_ADS_COMMISSION = 3
FIXED_CPA = 4
'''
BudgetTypeEnum = BudgetTypeEnum() # For __getattribute__
class CallConversionReportingStateEnum(_CreateEnumTypeUponFirstAccess):
CallConversionReportingStateEnum = '''\
class CallConversionReportingState(enum.IntEnum):
"""
Possible data types for a call conversion action state.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DISABLED (int): Call conversion action is disabled.
USE_ACCOUNT_LEVEL_CALL_CONVERSION_ACTION (int): Call conversion action will use call conversion type set at the
account level.
USE_RESOURCE_LEVEL_CALL_CONVERSION_ACTION (int): Call conversion action will use call conversion type set at the resource
(call only ads/call extensions) level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISABLED = 2
USE_ACCOUNT_LEVEL_CALL_CONVERSION_ACTION = 3
USE_RESOURCE_LEVEL_CALL_CONVERSION_ACTION = 4
'''
CallConversionReportingStateEnum = CallConversionReportingStateEnum() # For __getattribute__
class CallPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
CallPlaceholderFieldEnum = '''\
class CallPlaceholderField(enum.IntEnum):
"""
Possible values for Call placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PHONE_NUMBER (int): Data Type: STRING. The advertiser's phone number to append to the ad.
COUNTRY_CODE (int): Data Type: STRING. Uppercase two-letter country code of the advertiser's
phone number.
TRACKED (int): Data Type: BOOLEAN. Indicates whether call tracking is enabled. Default:
true.
CONVERSION_TYPE_ID (int): Data Type: INT64. The ID of an AdCallMetricsConversion object. This
object contains the phoneCallDurationfield which is the minimum duration
(in seconds) of a call to be considered a conversion.
CONVERSION_REPORTING_STATE (int): Data Type: STRING. Indicates whether this call extension uses its own
call conversion setting or follows the account level setting. Valid
values are: USE\_ACCOUNT\_LEVEL\_CALL\_CONVERSION\_ACTION and
USE\_RESOURCE\_LEVEL\_CALL\_CONVERSION\_ACTION.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PHONE_NUMBER = 2
COUNTRY_CODE = 3
TRACKED = 4
CONVERSION_TYPE_ID = 5
CONVERSION_REPORTING_STATE = 6
'''
CallPlaceholderFieldEnum = CallPlaceholderFieldEnum() # For __getattribute__
class CalloutPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
CalloutPlaceholderFieldEnum = '''\
class CalloutPlaceholderField(enum.IntEnum):
"""
Possible values for Callout placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CALLOUT_TEXT (int): Data Type: STRING. Callout text.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CALLOUT_TEXT = 2
'''
CalloutPlaceholderFieldEnum = CalloutPlaceholderFieldEnum() # For __getattribute__
class CampaignBudgetErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignBudgetErrorEnum = '''\
class CampaignBudgetError(enum.IntEnum):
"""
Enum describing possible campaign budget errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CAMPAIGN_BUDGET_CANNOT_BE_SHARED (int): The campaign budget cannot be shared.
CAMPAIGN_BUDGET_REMOVED (int): The requested campaign budget no longer exists.
CAMPAIGN_BUDGET_IN_USE (int): The campaign budget is associated with at least one campaign, and so the
campaign budget cannot be removed.
CAMPAIGN_BUDGET_PERIOD_NOT_AVAILABLE (int): Customer is not whitelisted for this campaign budget period.
CANNOT_MODIFY_FIELD_OF_IMPLICITLY_SHARED_CAMPAIGN_BUDGET (int): This field is not mutable on implicitly shared campaign budgets
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_IMPLICITLY_SHARED (int): Cannot change explicitly shared campaign budgets back to implicitly
shared ones.
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED_WITHOUT_NAME (int): An implicit campaign budget without a name cannot be changed to
explicitly shared campaign budget.
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED (int): Cannot change an implicitly shared campaign budget to an explicitly
shared one.
CANNOT_USE_IMPLICITLY_SHARED_CAMPAIGN_BUDGET_WITH_MULTIPLE_CAMPAIGNS (int): Only explicitly shared campaign budgets can be used with multiple
campaigns.
DUPLICATE_NAME (int): A campaign budget with this name already exists.
MONEY_AMOUNT_IN_WRONG_CURRENCY (int): A money amount was not in the expected currency.
MONEY_AMOUNT_LESS_THAN_CURRENCY_MINIMUM_CPC (int): A money amount was less than the minimum CPC for currency.
MONEY_AMOUNT_TOO_LARGE (int): A money amount was greater than the maximum allowed.
NEGATIVE_MONEY_AMOUNT (int): A money amount was negative.
NON_MULTIPLE_OF_MINIMUM_CURRENCY_UNIT (int): A money amount was not a multiple of a minimum unit.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_BUDGET_CANNOT_BE_SHARED = 17
CAMPAIGN_BUDGET_REMOVED = 2
CAMPAIGN_BUDGET_IN_USE = 3
CAMPAIGN_BUDGET_PERIOD_NOT_AVAILABLE = 4
CANNOT_MODIFY_FIELD_OF_IMPLICITLY_SHARED_CAMPAIGN_BUDGET = 6
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_IMPLICITLY_SHARED = 7
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED_WITHOUT_NAME = 8
CANNOT_UPDATE_CAMPAIGN_BUDGET_TO_EXPLICITLY_SHARED = 9
CANNOT_USE_IMPLICITLY_SHARED_CAMPAIGN_BUDGET_WITH_MULTIPLE_CAMPAIGNS = 10
DUPLICATE_NAME = 11
MONEY_AMOUNT_IN_WRONG_CURRENCY = 12
MONEY_AMOUNT_LESS_THAN_CURRENCY_MINIMUM_CPC = 13
MONEY_AMOUNT_TOO_LARGE = 14
NEGATIVE_MONEY_AMOUNT = 15
NON_MULTIPLE_OF_MINIMUM_CURRENCY_UNIT = 16
'''
CampaignBudgetErrorEnum = CampaignBudgetErrorEnum() # For __getattribute__
class CampaignCriterionErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignCriterionErrorEnum = '''\
class CampaignCriterionError(enum.IntEnum):
"""
Enum describing possible campaign criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CONCRETE_TYPE_REQUIRED (int): Concrete type of criterion (keyword v.s. placement) is required for
CREATE and UPDATE operations.
INVALID_PLACEMENT_URL (int): Invalid placement URL.
CANNOT_EXCLUDE_CRITERIA_TYPE (int): Criteria type can not be excluded for the campaign by the customer. like
AOL account type cannot target site type criteria
CANNOT_SET_STATUS_FOR_CRITERIA_TYPE (int): Cannot set the campaign criterion status for this criteria type.
CANNOT_SET_STATUS_FOR_EXCLUDED_CRITERIA (int): Cannot set the campaign criterion status for an excluded criteria.
CANNOT_TARGET_AND_EXCLUDE (int): Cannot target and exclude the same criterion.
TOO_MANY_OPERATIONS (int): The mutate contained too many operations.
OPERATOR_NOT_SUPPORTED_FOR_CRITERION_TYPE (int): This operator cannot be applied to a criterion of this type.
SHOPPING_CAMPAIGN_SALES_COUNTRY_NOT_SUPPORTED_FOR_SALES_CHANNEL (int): The Shopping campaign sales country is not supported for
ProductSalesChannel targeting.
CANNOT_ADD_EXISTING_FIELD (int): The existing field can't be updated with CREATE operation. It can be
updated with UPDATE operation only.
CANNOT_UPDATE_NEGATIVE_CRITERION (int): Negative criteria are immutable, so updates are not allowed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONCRETE_TYPE_REQUIRED = 2
INVALID_PLACEMENT_URL = 3
CANNOT_EXCLUDE_CRITERIA_TYPE = 4
CANNOT_SET_STATUS_FOR_CRITERIA_TYPE = 5
CANNOT_SET_STATUS_FOR_EXCLUDED_CRITERIA = 6
CANNOT_TARGET_AND_EXCLUDE = 7
TOO_MANY_OPERATIONS = 8
OPERATOR_NOT_SUPPORTED_FOR_CRITERION_TYPE = 9
SHOPPING_CAMPAIGN_SALES_COUNTRY_NOT_SUPPORTED_FOR_SALES_CHANNEL = 10
CANNOT_ADD_EXISTING_FIELD = 11
CANNOT_UPDATE_NEGATIVE_CRITERION = 12
'''
CampaignCriterionErrorEnum = CampaignCriterionErrorEnum() # For __getattribute__
class CampaignCriterionStatusEnum(_CreateEnumTypeUponFirstAccess):
CampaignCriterionStatusEnum = '''\
class CampaignCriterionStatus(enum.IntEnum):
"""
The possible statuses of a CampaignCriterion.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The campaign criterion is enabled.
PAUSED (int): The campaign criterion is paused.
REMOVED (int): The campaign criterion is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
'''
CampaignCriterionStatusEnum = CampaignCriterionStatusEnum() # For __getattribute__
class CampaignDraftErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignDraftErrorEnum = '''\
class CampaignDraftError(enum.IntEnum):
"""
Enum describing possible campaign draft errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_DRAFT_NAME (int): A draft with this name already exists for this campaign.
INVALID_STATUS_TRANSITION_FROM_REMOVED (int): The draft is removed and cannot be transitioned to another status.
INVALID_STATUS_TRANSITION_FROM_PROMOTED (int): The draft has been promoted and cannot be transitioned to the specified
status.
INVALID_STATUS_TRANSITION_FROM_PROMOTE_FAILED (int): The draft has failed to be promoted and cannot be transitioned to the
specified status.
CUSTOMER_CANNOT_CREATE_DRAFT (int): This customer is not allowed to create drafts.
CAMPAIGN_CANNOT_CREATE_DRAFT (int): This campaign is not allowed to create drafts.
INVALID_DRAFT_CHANGE (int): This modification cannot be made on a draft.
INVALID_STATUS_TRANSITION (int): The draft cannot be transitioned to the specified status from its
current status.
MAX_NUMBER_OF_DRAFTS_PER_CAMPAIGN_REACHED (int): The campaign has reached the maximum number of drafts that can be created
for a campaign throughout its lifetime. No additional drafts can be
created for this campaign. Removed drafts also count towards this limit.
LIST_ERRORS_FOR_PROMOTED_DRAFT_ONLY (int): ListAsyncErrors was called without first promoting the draft.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_DRAFT_NAME = 2
INVALID_STATUS_TRANSITION_FROM_REMOVED = 3
INVALID_STATUS_TRANSITION_FROM_PROMOTED = 4
INVALID_STATUS_TRANSITION_FROM_PROMOTE_FAILED = 5
CUSTOMER_CANNOT_CREATE_DRAFT = 6
CAMPAIGN_CANNOT_CREATE_DRAFT = 7
INVALID_DRAFT_CHANGE = 8
INVALID_STATUS_TRANSITION = 9
MAX_NUMBER_OF_DRAFTS_PER_CAMPAIGN_REACHED = 10
LIST_ERRORS_FOR_PROMOTED_DRAFT_ONLY = 11
'''
CampaignDraftErrorEnum = CampaignDraftErrorEnum() # For __getattribute__
class CampaignDraftStatusEnum(_CreateEnumTypeUponFirstAccess):
CampaignDraftStatusEnum = '''\
class CampaignDraftStatus(enum.IntEnum):
"""
Possible statuses of a campaign draft.
Attributes:
UNSPECIFIED (int): The status has not been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROPOSED (int): Initial state of the draft, the advertiser can start adding changes with
no effect on serving.
REMOVED (int): The campaign draft is removed.
PROMOTING (int): Advertiser requested to promote draft's changes back into the original
campaign. Advertiser can poll the long running operation returned by
the promote action to see the status of the promotion.
PROMOTED (int): The process to merge changes in the draft back to the original campaign
has completed successfully.
PROMOTE_FAILED (int): The promotion failed after it was partially applied. Promote cannot be
attempted again safely, so the issue must be corrected in the original
campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROPOSED = 2
REMOVED = 3
PROMOTING = 5
PROMOTED = 4
PROMOTE_FAILED = 6
'''
CampaignDraftStatusEnum = CampaignDraftStatusEnum() # For __getattribute__
class CampaignErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignErrorEnum = '''\
class CampaignError(enum.IntEnum):
"""
Enum describing possible campaign errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_TARGET_CONTENT_NETWORK (int): Cannot target content network.
CANNOT_TARGET_SEARCH_NETWORK (int): Cannot target search network.
CANNOT_TARGET_SEARCH_NETWORK_WITHOUT_GOOGLE_SEARCH (int): Cannot cover search network without google search network.
CANNOT_TARGET_GOOGLE_SEARCH_FOR_CPM_CAMPAIGN (int): Cannot target Google Search network for a CPM campaign.
CAMPAIGN_MUST_TARGET_AT_LEAST_ONE_NETWORK (int): Must target at least one network.
CANNOT_TARGET_PARTNER_SEARCH_NETWORK (int): Only some Google partners are allowed to target partner search network.
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CRITERIA_LEVEL_BIDDING_STRATEGY (int): Cannot target content network only as campaign has criteria-level bidding
strategy.
CAMPAIGN_DURATION_MUST_CONTAIN_ALL_RUNNABLE_TRIALS (int): Cannot modify the start or end date such that the campaign duration would
not contain the durations of all runnable trials.
CANNOT_MODIFY_FOR_TRIAL_CAMPAIGN (int): Cannot modify dates, budget or campaign name of a trial campaign.
DUPLICATE_CAMPAIGN_NAME (int): Trying to modify the name of an active or paused campaign, where the name
is already assigned to another active or paused campaign.
INCOMPATIBLE_CAMPAIGN_FIELD (int): Two fields are in conflicting modes.
INVALID_CAMPAIGN_NAME (int): Campaign name cannot be used.
INVALID_AD_SERVING_OPTIMIZATION_STATUS (int): Given status is invalid.
INVALID_TRACKING_URL (int): Error in the campaign level tracking url.
CANNOT_SET_BOTH_TRACKING_URL_TEMPLATE_AND_TRACKING_SETTING (int): Cannot set both tracking url template and tracking setting. An user has
to clear legacy tracking setting in order to add tracking url template.
MAX_IMPRESSIONS_NOT_IN_RANGE (int): The maximum number of impressions for Frequency Cap should be an integer
greater than 0.
TIME_UNIT_NOT_SUPPORTED (int): Only the Day, Week and Month time units are supported.
INVALID_OPERATION_IF_SERVING_STATUS_HAS_ENDED (int): Operation not allowed on a campaign whose serving status has ended
BUDGET_CANNOT_BE_SHARED (int): This budget is exclusively linked to a Campaign that is using experiments
so it cannot be shared.
CAMPAIGN_CANNOT_USE_SHARED_BUDGET (int): Campaigns using experiments cannot use a shared budget.
CANNOT_CHANGE_BUDGET_ON_CAMPAIGN_WITH_TRIALS (int): A different budget cannot be assigned to a campaign when there are
running or scheduled trials.
CAMPAIGN_LABEL_DOES_NOT_EXIST (int): No link found between the campaign and the label.
CAMPAIGN_LABEL_ALREADY_EXISTS (int): The label has already been attached to the campaign.
MISSING_SHOPPING_SETTING (int): A ShoppingSetting was not found when creating a shopping campaign.
INVALID_SHOPPING_SALES_COUNTRY (int): The country in shopping setting is not an allowed country.
ADVERTISING_CHANNEL_TYPE_NOT_AVAILABLE_FOR_ACCOUNT_TYPE (int): The requested channel type is not available according to the customer's
account setting.
INVALID_ADVERTISING_CHANNEL_SUB_TYPE (int): The AdvertisingChannelSubType is not a valid subtype of the primary
channel type.
AT_LEAST_ONE_CONVERSION_MUST_BE_SELECTED (int): At least one conversion must be selected.
CANNOT_SET_AD_ROTATION_MODE (int): Setting ad rotation mode for a campaign is not allowed. Ad rotation mode
at campaign is deprecated.
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED (int): Trying to change start date on a campaign that has started.
CANNOT_SET_DATE_TO_PAST (int): Trying to modify a date into the past.
MISSING_HOTEL_CUSTOMER_LINK (int): Hotel center id in the hotel setting does not match any customer links.
INVALID_HOTEL_CUSTOMER_LINK (int): Hotel center id in the hotel setting must match an active customer link.
MISSING_HOTEL_SETTING (int): Hotel setting was not found when creating a hotel ads campaign.
CANNOT_USE_SHARED_CAMPAIGN_BUDGET_WHILE_PART_OF_CAMPAIGN_GROUP (int): A Campaign cannot use shared campaign budgets and be part of a campaign
group.
APP_NOT_FOUND (int): The app ID was not found.
SHOPPING_ENABLE_LOCAL_NOT_SUPPORTED_FOR_CAMPAIGN_TYPE (int): Campaign.shopping\_setting.enable\_local is not supported for the
specified campaign type.
MERCHANT_NOT_ALLOWED_FOR_COMPARISON_LISTING_ADS (int): The merchant does not support the creation of campaigns for Shopping
Comparison Listing Ads.
INSUFFICIENT_APP_INSTALLS_COUNT (int): The App campaign for engagement cannot be created because there aren't
enough installs.
SENSITIVE_CATEGORY_APP (int): The App campaign for engagement cannot be created because the app is
sensitive.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_TARGET_CONTENT_NETWORK = 3
CANNOT_TARGET_SEARCH_NETWORK = 4
CANNOT_TARGET_SEARCH_NETWORK_WITHOUT_GOOGLE_SEARCH = 5
CANNOT_TARGET_GOOGLE_SEARCH_FOR_CPM_CAMPAIGN = 6
CAMPAIGN_MUST_TARGET_AT_LEAST_ONE_NETWORK = 7
CANNOT_TARGET_PARTNER_SEARCH_NETWORK = 8
CANNOT_TARGET_CONTENT_NETWORK_ONLY_WITH_CRITERIA_LEVEL_BIDDING_STRATEGY = 9
CAMPAIGN_DURATION_MUST_CONTAIN_ALL_RUNNABLE_TRIALS = 10
CANNOT_MODIFY_FOR_TRIAL_CAMPAIGN = 11
DUPLICATE_CAMPAIGN_NAME = 12
INCOMPATIBLE_CAMPAIGN_FIELD = 13
INVALID_CAMPAIGN_NAME = 14
INVALID_AD_SERVING_OPTIMIZATION_STATUS = 15
INVALID_TRACKING_URL = 16
CANNOT_SET_BOTH_TRACKING_URL_TEMPLATE_AND_TRACKING_SETTING = 17
MAX_IMPRESSIONS_NOT_IN_RANGE = 18
TIME_UNIT_NOT_SUPPORTED = 19
INVALID_OPERATION_IF_SERVING_STATUS_HAS_ENDED = 20
BUDGET_CANNOT_BE_SHARED = 21
CAMPAIGN_CANNOT_USE_SHARED_BUDGET = 22
CANNOT_CHANGE_BUDGET_ON_CAMPAIGN_WITH_TRIALS = 23
CAMPAIGN_LABEL_DOES_NOT_EXIST = 24
CAMPAIGN_LABEL_ALREADY_EXISTS = 25
MISSING_SHOPPING_SETTING = 26
INVALID_SHOPPING_SALES_COUNTRY = 27
ADVERTISING_CHANNEL_TYPE_NOT_AVAILABLE_FOR_ACCOUNT_TYPE = 31
INVALID_ADVERTISING_CHANNEL_SUB_TYPE = 32
AT_LEAST_ONE_CONVERSION_MUST_BE_SELECTED = 33
CANNOT_SET_AD_ROTATION_MODE = 34
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED = 35
CANNOT_SET_DATE_TO_PAST = 36
MISSING_HOTEL_CUSTOMER_LINK = 37
INVALID_HOTEL_CUSTOMER_LINK = 38
MISSING_HOTEL_SETTING = 39
CANNOT_USE_SHARED_CAMPAIGN_BUDGET_WHILE_PART_OF_CAMPAIGN_GROUP = 40
APP_NOT_FOUND = 41
SHOPPING_ENABLE_LOCAL_NOT_SUPPORTED_FOR_CAMPAIGN_TYPE = 42
MERCHANT_NOT_ALLOWED_FOR_COMPARISON_LISTING_ADS = 43
INSUFFICIENT_APP_INSTALLS_COUNT = 44
SENSITIVE_CATEGORY_APP = 45
'''
CampaignErrorEnum = CampaignErrorEnum() # For __getattribute__
class CampaignExperimentErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignExperimentErrorEnum = '''\
class CampaignExperimentError(enum.IntEnum):
"""
Enum describing possible campaign experiment errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_NAME (int): An active campaign or experiment with this name already exists.
INVALID_TRANSITION (int): Experiment cannot be updated from the current state to the
requested target state. For example, an experiment can only graduate
if its status is ENABLED.
CANNOT_CREATE_EXPERIMENT_WITH_SHARED_BUDGET (int): Cannot create an experiment from a campaign using an explicitly shared
budget.
CANNOT_CREATE_EXPERIMENT_FOR_REMOVED_BASE_CAMPAIGN (int): Cannot create an experiment for a removed base campaign.
CANNOT_CREATE_EXPERIMENT_FOR_NON_PROPOSED_DRAFT (int): Cannot create an experiment from a draft, which has a status other than
proposed.
CUSTOMER_CANNOT_CREATE_EXPERIMENT (int): This customer is not allowed to create an experiment.
CAMPAIGN_CANNOT_CREATE_EXPERIMENT (int): This campaign is not allowed to create an experiment.
EXPERIMENT_DURATIONS_MUST_NOT_OVERLAP (int): Trying to set an experiment duration which overlaps with another
experiment.
EXPERIMENT_DURATION_MUST_BE_WITHIN_CAMPAIGN_DURATION (int): All non-removed experiments must start and end within their campaign's
duration.
CANNOT_MUTATE_EXPERIMENT_DUE_TO_STATUS (int): The experiment cannot be modified because its status is in a terminal
state, such as REMOVED.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
INVALID_TRANSITION = 3
CANNOT_CREATE_EXPERIMENT_WITH_SHARED_BUDGET = 4
CANNOT_CREATE_EXPERIMENT_FOR_REMOVED_BASE_CAMPAIGN = 5
CANNOT_CREATE_EXPERIMENT_FOR_NON_PROPOSED_DRAFT = 6
CUSTOMER_CANNOT_CREATE_EXPERIMENT = 7
CAMPAIGN_CANNOT_CREATE_EXPERIMENT = 8
EXPERIMENT_DURATIONS_MUST_NOT_OVERLAP = 9
EXPERIMENT_DURATION_MUST_BE_WITHIN_CAMPAIGN_DURATION = 10
CANNOT_MUTATE_EXPERIMENT_DUE_TO_STATUS = 11
'''
CampaignExperimentErrorEnum = CampaignExperimentErrorEnum() # For __getattribute__
class CampaignExperimentStatusEnum(_CreateEnumTypeUponFirstAccess):
CampaignExperimentStatusEnum = '''\
class CampaignExperimentStatus(enum.IntEnum):
"""
Possible statuses of a campaign experiment.
Attributes:
UNSPECIFIED (int): The status has not been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INITIALIZING (int): The experiment campaign is being initialized.
INITIALIZATION_FAILED (int): Initialization of the experiment campaign failed.
ENABLED (int): The experiment campaign is fully initialized. The experiment is currently
running, scheduled to run in the future or has ended based on its
end date. An experiment with the status INITIALIZING will be updated to
ENABLED when it is fully created.
GRADUATED (int): The experiment campaign was graduated to a stand-alone
campaign, existing independently of the experiment.
REMOVED (int): The experiment is removed.
PROMOTING (int): The experiment's changes are being applied to the original campaign.
The long running operation returned by the promote method can be polled
to see the status of the promotion.
PROMOTION_FAILED (int): Promote of the experiment campaign failed.
PROMOTED (int): The changes of the experiment are promoted to their original campaign.
ENDED_MANUALLY (int): The experiment was ended manually. It did not end based on its end date.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INITIALIZING = 2
INITIALIZATION_FAILED = 8
ENABLED = 3
GRADUATED = 4
REMOVED = 5
PROMOTING = 6
PROMOTION_FAILED = 9
PROMOTED = 7
ENDED_MANUALLY = 10
'''
CampaignExperimentStatusEnum = CampaignExperimentStatusEnum() # For __getattribute__
class CampaignExperimentTrafficSplitTypeEnum(_CreateEnumTypeUponFirstAccess):
CampaignExperimentTrafficSplitTypeEnum = '''\
class CampaignExperimentTrafficSplitType(enum.IntEnum):
"""
Enum of strategies for splitting traffic between base and experiment
campaigns in campaign experiment.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
RANDOM_QUERY (int): Traffic is randomly assigned to the base or experiment arm for each
query, independent of previous assignments for the same user.
COOKIE (int): Traffic is split using cookies to keep users in the same arm (base or
experiment) of the experiment.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RANDOM_QUERY = 2
COOKIE = 3
'''
CampaignExperimentTrafficSplitTypeEnum = CampaignExperimentTrafficSplitTypeEnum() # For __getattribute__
class CampaignExperimentTypeEnum(_CreateEnumTypeUponFirstAccess):
CampaignExperimentTypeEnum = '''\
class CampaignExperimentType(enum.IntEnum):
"""
Indicates if this campaign is a normal campaign,
a draft campaign, or an experiment campaign.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BASE (int): This is a regular campaign.
DRAFT (int): This is a draft version of a campaign.
It has some modifications from a base campaign,
but it does not serve or accrue metrics.
EXPERIMENT (int): This is an experiment version of a campaign.
It has some modifications from a base campaign,
and a percentage of traffic is being diverted
from the BASE campaign to this experiment campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BASE = 2
DRAFT = 3
EXPERIMENT = 4
'''
CampaignExperimentTypeEnum = CampaignExperimentTypeEnum() # For __getattribute__
class CampaignFeedErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignFeedErrorEnum = '''\
class CampaignFeedError(enum.IntEnum):
"""
Enum describing possible campaign feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active feed already exists for this campaign and placeholder type.
CANNOT_CREATE_FOR_REMOVED_FEED (int): The specified feed is removed.
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED (int): The CampaignFeed already exists. UPDATE should be used to modify the
existing CampaignFeed.
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED (int): Cannot update removed campaign feed.
INVALID_PLACEHOLDER_TYPE (int): Invalid placeholder type.
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE (int): Feed mapping for this placeholder type does not exist.
NO_EXISTING_LOCATION_CUSTOMER_FEED (int): Location CampaignFeeds cannot be created unless there is a location
CustomerFeed for the specified feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 4
CANNOT_CREATE_ALREADY_EXISTING_CAMPAIGN_FEED = 5
CANNOT_MODIFY_REMOVED_CAMPAIGN_FEED = 6
INVALID_PLACEHOLDER_TYPE = 7
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 8
NO_EXISTING_LOCATION_CUSTOMER_FEED = 9
'''
CampaignFeedErrorEnum = CampaignFeedErrorEnum() # For __getattribute__
class CampaignServingStatusEnum(_CreateEnumTypeUponFirstAccess):
CampaignServingStatusEnum = '''\
class CampaignServingStatus(enum.IntEnum):
"""
Possible serving statuses of a campaign.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
SERVING (int): Serving.
NONE (int): None.
ENDED (int): Ended.
PENDING (int): Pending.
SUSPENDED (int): Suspended.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SERVING = 2
NONE = 3
ENDED = 4
PENDING = 5
SUSPENDED = 6
'''
CampaignServingStatusEnum = CampaignServingStatusEnum() # For __getattribute__
class CampaignSharedSetErrorEnum(_CreateEnumTypeUponFirstAccess):
CampaignSharedSetErrorEnum = '''\
class CampaignSharedSetError(enum.IntEnum):
"""
Enum describing possible campaign shared set errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
SHARED_SET_ACCESS_DENIED (int): The shared set belongs to another customer and permission isn't granted.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SHARED_SET_ACCESS_DENIED = 2
'''
CampaignSharedSetErrorEnum = CampaignSharedSetErrorEnum() # For __getattribute__
class CampaignSharedSetStatusEnum(_CreateEnumTypeUponFirstAccess):
CampaignSharedSetStatusEnum = '''\
class CampaignSharedSetStatus(enum.IntEnum):
"""
Enum listing the possible campaign shared set statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The campaign shared set is enabled.
REMOVED (int): The campaign shared set is removed and can no longer be used.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
CampaignSharedSetStatusEnum = CampaignSharedSetStatusEnum() # For __getattribute__
class CampaignStatusEnum(_CreateEnumTypeUponFirstAccess):
CampaignStatusEnum = '''\
class CampaignStatus(enum.IntEnum):
"""
Possible statuses of a campaign.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Campaign is currently serving ads depending on budget information.
PAUSED (int): Campaign has been paused by the user.
REMOVED (int): Campaign has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PAUSED = 3
REMOVED = 4
'''
CampaignStatusEnum = CampaignStatusEnum() # For __getattribute__
class ChangeStatusErrorEnum(_CreateEnumTypeUponFirstAccess):
ChangeStatusErrorEnum = '''\
class ChangeStatusError(enum.IntEnum):
"""
Enum describing possible change status errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
START_DATE_TOO_OLD (int): The requested start date is too old.
"""
UNSPECIFIED = 0
UNKNOWN = 1
START_DATE_TOO_OLD = 3
'''
ChangeStatusErrorEnum = ChangeStatusErrorEnum() # For __getattribute__
class ChangeStatusOperationEnum(_CreateEnumTypeUponFirstAccess):
ChangeStatusOperationEnum = '''\
class ChangeStatusOperation(enum.IntEnum):
"""
Status of the changed resource
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents an unclassified resource unknown
in this version.
ADDED (int): The resource was created.
CHANGED (int): The resource was modified.
REMOVED (int): The resource was removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDED = 2
CHANGED = 3
REMOVED = 4
'''
ChangeStatusOperationEnum = ChangeStatusOperationEnum() # For __getattribute__
class ChangeStatusResourceTypeEnum(_CreateEnumTypeUponFirstAccess):
ChangeStatusResourceTypeEnum = '''\
class ChangeStatusResourceType(enum.IntEnum):
"""
Enum listing the resource types support by the ChangeStatus resource.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents an unclassified resource unknown
in this version.
AD_GROUP (int): An AdGroup resource change.
AD_GROUP_AD (int): An AdGroupAd resource change.
AD_GROUP_CRITERION (int): An AdGroupCriterion resource change.
CAMPAIGN (int): A Campaign resource change.
CAMPAIGN_CRITERION (int): A CampaignCriterion resource change.
FEED (int): A Feed resource change.
FEED_ITEM (int): A FeedItem resource change.
AD_GROUP_FEED (int): An AdGroupFeed resource change.
CAMPAIGN_FEED (int): A CampaignFeed resource change.
AD_GROUP_BID_MODIFIER (int): An AdGroupBidModifier resource change.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP = 3
AD_GROUP_AD = 4
AD_GROUP_CRITERION = 5
CAMPAIGN = 6
CAMPAIGN_CRITERION = 7
FEED = 9
FEED_ITEM = 10
AD_GROUP_FEED = 11
CAMPAIGN_FEED = 12
AD_GROUP_BID_MODIFIER = 13
'''
ChangeStatusResourceTypeEnum = ChangeStatusResourceTypeEnum() # For __getattribute__
class ClickTypeEnum(_CreateEnumTypeUponFirstAccess):
ClickTypeEnum = '''\
class ClickType(enum.IntEnum):
"""
Enumerates Google Ads click types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
APP_DEEPLINK (int): App engagement ad deep link.
BREADCRUMBS (int): Breadcrumbs.
BROADBAND_PLAN (int): Broadband Plan.
CALL_TRACKING (int): Manually dialed phone calls.
CALLS (int): Phone calls.
CLICK_ON_ENGAGEMENT_AD (int): Click on engagement ad.
GET_DIRECTIONS (int): Driving direction.
LOCATION_EXPANSION (int): Get location details.
LOCATION_FORMAT_CALL (int): Call.
LOCATION_FORMAT_DIRECTIONS (int): Directions.
LOCATION_FORMAT_IMAGE (int): Image(s).
LOCATION_FORMAT_LANDING_PAGE (int): Go to landing page.
LOCATION_FORMAT_MAP (int): Map.
LOCATION_FORMAT_STORE_INFO (int): Go to store info.
LOCATION_FORMAT_TEXT (int): Text.
MOBILE_CALL_TRACKING (int): Mobile phone calls.
OFFER_PRINTS (int): Print offer.
OTHER (int): Other.
PRODUCT_EXTENSION_CLICKS (int): Product plusbox offer.
PRODUCT_LISTING_AD_CLICKS (int): Shopping - Product - Online.
SITELINKS (int): Sitelink.
STORE_LOCATOR (int): Show nearby locations.
URL_CLICKS (int): Headline.
VIDEO_APP_STORE_CLICKS (int): App store.
VIDEO_CALL_TO_ACTION_CLICKS (int): Call-to-Action overlay.
VIDEO_CARD_ACTION_HEADLINE_CLICKS (int): Cards.
VIDEO_END_CAP_CLICKS (int): End cap.
VIDEO_WEBSITE_CLICKS (int): Website.
VISUAL_SITELINKS (int): Visual Sitelinks.
WIRELESS_PLAN (int): Wireless Plan.
PRODUCT_LISTING_AD_LOCAL (int): Shopping - Product - Local.
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL (int): Shopping - Product - MultiChannel Local.
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE (int): Shopping - Product - MultiChannel Online.
PRODUCT_LISTING_ADS_COUPON (int): Shopping - Product - Coupon.
PRODUCT_LISTING_AD_TRANSACTABLE (int): Shopping - Product - Sell on Google.
PRODUCT_AD_APP_DEEPLINK (int): Shopping - Product - App engagement ad deep link.
SHOWCASE_AD_CATEGORY_LINK (int): Shopping - Showcase - Category.
SHOWCASE_AD_LOCAL_STOREFRONT_LINK (int): Shopping - Showcase - Local storefront.
SHOWCASE_AD_ONLINE_PRODUCT_LINK (int): Shopping - Showcase - Online product.
SHOWCASE_AD_LOCAL_PRODUCT_LINK (int): Shopping - Showcase - Local product.
PROMOTION_EXTENSION (int): Promotion Extension.
SWIPEABLE_GALLERY_AD_HEADLINE (int): Ad Headline.
SWIPEABLE_GALLERY_AD_SWIPES (int): Swipes.
SWIPEABLE_GALLERY_AD_SEE_MORE (int): See More.
SWIPEABLE_GALLERY_AD_SITELINK_ONE (int): Sitelink 1.
SWIPEABLE_GALLERY_AD_SITELINK_TWO (int): Sitelink 2.
SWIPEABLE_GALLERY_AD_SITELINK_THREE (int): Sitelink 3.
SWIPEABLE_GALLERY_AD_SITELINK_FOUR (int): Sitelink 4.
SWIPEABLE_GALLERY_AD_SITELINK_FIVE (int): Sitelink 5.
HOTEL_PRICE (int): Hotel price.
PRICE_EXTENSION (int): Price Extension.
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION (int): Book on Google hotel room selection.
SHOPPING_COMPARISON_LISTING (int): Shopping - Comparison Listing.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APP_DEEPLINK = 2
BREADCRUMBS = 3
BROADBAND_PLAN = 4
CALL_TRACKING = 5
CALLS = 6
CLICK_ON_ENGAGEMENT_AD = 7
GET_DIRECTIONS = 8
LOCATION_EXPANSION = 9
LOCATION_FORMAT_CALL = 10
LOCATION_FORMAT_DIRECTIONS = 11
LOCATION_FORMAT_IMAGE = 12
LOCATION_FORMAT_LANDING_PAGE = 13
LOCATION_FORMAT_MAP = 14
LOCATION_FORMAT_STORE_INFO = 15
LOCATION_FORMAT_TEXT = 16
MOBILE_CALL_TRACKING = 17
OFFER_PRINTS = 18
OTHER = 19
PRODUCT_EXTENSION_CLICKS = 20
PRODUCT_LISTING_AD_CLICKS = 21
SITELINKS = 22
STORE_LOCATOR = 23
URL_CLICKS = 25
VIDEO_APP_STORE_CLICKS = 26
VIDEO_CALL_TO_ACTION_CLICKS = 27
VIDEO_CARD_ACTION_HEADLINE_CLICKS = 28
VIDEO_END_CAP_CLICKS = 29
VIDEO_WEBSITE_CLICKS = 30
VISUAL_SITELINKS = 31
WIRELESS_PLAN = 32
PRODUCT_LISTING_AD_LOCAL = 33
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL = 34
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE = 35
PRODUCT_LISTING_ADS_COUPON = 36
PRODUCT_LISTING_AD_TRANSACTABLE = 37
PRODUCT_AD_APP_DEEPLINK = 38
SHOWCASE_AD_CATEGORY_LINK = 39
SHOWCASE_AD_LOCAL_STOREFRONT_LINK = 40
SHOWCASE_AD_ONLINE_PRODUCT_LINK = 42
SHOWCASE_AD_LOCAL_PRODUCT_LINK = 43
PROMOTION_EXTENSION = 44
SWIPEABLE_GALLERY_AD_HEADLINE = 45
SWIPEABLE_GALLERY_AD_SWIPES = 46
SWIPEABLE_GALLERY_AD_SEE_MORE = 47
SWIPEABLE_GALLERY_AD_SITELINK_ONE = 48
SWIPEABLE_GALLERY_AD_SITELINK_TWO = 49
SWIPEABLE_GALLERY_AD_SITELINK_THREE = 50
SWIPEABLE_GALLERY_AD_SITELINK_FOUR = 51
SWIPEABLE_GALLERY_AD_SITELINK_FIVE = 52
HOTEL_PRICE = 53
PRICE_EXTENSION = 54
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION = 55
SHOPPING_COMPARISON_LISTING = 56
'''
ClickTypeEnum = ClickTypeEnum() # For __getattribute__
class CollectionSizeErrorEnum(_CreateEnumTypeUponFirstAccess):
CollectionSizeErrorEnum = '''\
class CollectionSizeError(enum.IntEnum):
"""
Enum describing possible collection size errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_FEW (int): Too few.
TOO_MANY (int): Too many.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_FEW = 2
TOO_MANY = 3
'''
CollectionSizeErrorEnum = CollectionSizeErrorEnum() # For __getattribute__
class ContentLabelTypeEnum(_CreateEnumTypeUponFirstAccess):
ContentLabelTypeEnum = '''\
class ContentLabelType(enum.IntEnum):
"""
Enum listing the content label types supported by ContentLabel criterion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SEXUALLY_SUGGESTIVE (int): Sexually suggestive content.
BELOW_THE_FOLD (int): Below the fold placement.
PARKED_DOMAIN (int): Parked domain.
JUVENILE (int): Juvenile, gross & bizarre content.
PROFANITY (int): Profanity & rough language.
TRAGEDY (int): Death & tragedy.
VIDEO (int): Video.
VIDEO_RATING_DV_G (int): Content rating: G.
VIDEO_RATING_DV_PG (int): Content rating: PG.
VIDEO_RATING_DV_T (int): Content rating: T.
VIDEO_RATING_DV_MA (int): Content rating: MA.
VIDEO_NOT_YET_RATED (int): Content rating: not yet rated.
EMBEDDED_VIDEO (int): Embedded video.
LIVE_STREAMING_VIDEO (int): Live streaming video.
SOCIAL_ISSUES (int): Sensitive social issues.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEXUALLY_SUGGESTIVE = 2
BELOW_THE_FOLD = 3
PARKED_DOMAIN = 4
JUVENILE = 6
PROFANITY = 7
TRAGEDY = 8
VIDEO = 9
VIDEO_RATING_DV_G = 10
VIDEO_RATING_DV_PG = 11
VIDEO_RATING_DV_T = 12
VIDEO_RATING_DV_MA = 13
VIDEO_NOT_YET_RATED = 14
EMBEDDED_VIDEO = 15
LIVE_STREAMING_VIDEO = 16
SOCIAL_ISSUES = 17
'''
ContentLabelTypeEnum = ContentLabelTypeEnum() # For __getattribute__
class ContextErrorEnum(_CreateEnumTypeUponFirstAccess):
ContextErrorEnum = '''\
class ContextError(enum.IntEnum):
"""
Enum describing possible context errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
OPERATION_NOT_PERMITTED_FOR_CONTEXT (int): The operation is not allowed for the given context.
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE (int): The operation is not allowed for removed resources.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPERATION_NOT_PERMITTED_FOR_CONTEXT = 2
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE = 3
'''
ContextErrorEnum = ContextErrorEnum() # For __getattribute__
class ConversionActionCategoryEnum(_CreateEnumTypeUponFirstAccess):
ConversionActionCategoryEnum = '''\
class ConversionActionCategory(enum.IntEnum):
"""
The category of conversions that are associated with a ConversionAction.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DEFAULT (int): Default category.
PAGE_VIEW (int): User visiting a page.
PURCHASE (int): Purchase, sales, or "order placed" event.
SIGNUP (int): Signup user action.
LEAD (int): Lead-generating action.
DOWNLOAD (int): Software download action (as for an app).
"""
UNSPECIFIED = 0
UNKNOWN = 1
DEFAULT = 2
PAGE_VIEW = 3
PURCHASE = 4
SIGNUP = 5
LEAD = 6
DOWNLOAD = 7
'''
ConversionActionCategoryEnum = ConversionActionCategoryEnum() # For __getattribute__
class ConversionActionCountingTypeEnum(_CreateEnumTypeUponFirstAccess):
ConversionActionCountingTypeEnum = '''\
class ConversionActionCountingType(enum.IntEnum):
"""
Indicates how conversions for this action will be counted. For more
information, see https://support.google.com/google-ads/answer/3438531.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ONE_PER_CLICK (int): Count only one conversion per click.
MANY_PER_CLICK (int): Count all conversions per click.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ONE_PER_CLICK = 2
MANY_PER_CLICK = 3
'''
ConversionActionCountingTypeEnum = ConversionActionCountingTypeEnum() # For __getattribute__
class ConversionActionErrorEnum(_CreateEnumTypeUponFirstAccess):
ConversionActionErrorEnum = '''\
class ConversionActionError(enum.IntEnum):
"""
Enum describing possible conversion action errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_NAME (int): The specified conversion action name already exists.
DUPLICATE_APP_ID (int): Another conversion action with the specified app id already exists.
TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD (int): Android first open action conflicts with Google play codeless download
action tracking the same app.
BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION (int): Android first open action conflicts with Google play codeless download
action tracking the same app.
DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED (int): The attribution model cannot be set to DATA\_DRIVEN because a
data-driven model has never been generated.
DATA_DRIVEN_MODEL_EXPIRED (int): The attribution model cannot be set to DATA\_DRIVEN because the
data-driven model is expired.
DATA_DRIVEN_MODEL_STALE (int): The attribution model cannot be set to DATA\_DRIVEN because the
data-driven model is stale.
DATA_DRIVEN_MODEL_UNKNOWN (int): The attribution model cannot be set to DATA\_DRIVEN because the
data-driven model is unavailable or the conversion action was newly
added.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
DUPLICATE_APP_ID = 3
TWO_CONVERSION_ACTIONS_BIDDING_ON_SAME_APP_DOWNLOAD = 4
BIDDING_ON_SAME_APP_DOWNLOAD_AS_GLOBAL_ACTION = 5
DATA_DRIVEN_MODEL_WAS_NEVER_GENERATED = 6
DATA_DRIVEN_MODEL_EXPIRED = 7
DATA_DRIVEN_MODEL_STALE = 8
DATA_DRIVEN_MODEL_UNKNOWN = 9
'''
ConversionActionErrorEnum = ConversionActionErrorEnum() # For __getattribute__
class ConversionActionStatusEnum(_CreateEnumTypeUponFirstAccess):
ConversionActionStatusEnum = '''\
class ConversionActionStatus(enum.IntEnum):
"""
Possible statuses of a conversion action.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Conversions will be recorded.
REMOVED (int): Conversions will not be recorded.
HIDDEN (int): Conversions will not be recorded and the conversion action will not
appear in the UI.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
HIDDEN = 4
'''
ConversionActionStatusEnum = ConversionActionStatusEnum() # For __getattribute__
class ConversionActionTypeEnum(_CreateEnumTypeUponFirstAccess):
ConversionActionTypeEnum = '''\
class ConversionActionType(enum.IntEnum):
"""
Possible types of a conversion action.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AD_CALL (int): Conversions that occur when a user clicks on an ad's call extension.
CLICK_TO_CALL (int): Conversions that occur when a user on a mobile device clicks a phone
number.
GOOGLE_PLAY_DOWNLOAD (int): Conversions that occur when a user downloads a mobile app from the Google
Play Store.
GOOGLE_PLAY_IN_APP_PURCHASE (int): Conversions that occur when a user makes a purchase in an app through
Android billing.
UPLOAD_CALLS (int): Call conversions that are tracked by the advertiser and uploaded.
UPLOAD_CLICKS (int): Conversions that are tracked by the advertiser and uploaded with
attributed clicks.
WEBPAGE (int): Conversions that occur on a webpage.
WEBSITE_CALL (int): Conversions that occur when a user calls a dynamically-generated phone
number from an advertiser's website.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_CALL = 2
CLICK_TO_CALL = 3
GOOGLE_PLAY_DOWNLOAD = 4
GOOGLE_PLAY_IN_APP_PURCHASE = 5
UPLOAD_CALLS = 6
UPLOAD_CLICKS = 7
WEBPAGE = 8
WEBSITE_CALL = 9
'''
ConversionActionTypeEnum = ConversionActionTypeEnum() # For __getattribute__
class ConversionAdjustmentTypeEnum(_CreateEnumTypeUponFirstAccess):
ConversionAdjustmentTypeEnum = '''\
class ConversionAdjustmentType(enum.IntEnum):
"""
The different actions advertisers can take to adjust the conversions that
they already reported. Retractions negate a conversion. Restatements change
the value of a conversion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represents value unknown in this version.
RETRACTION (int): Negates a conversion so that its total value and count are both zero.
RESTATEMENT (int): Changes the value of a conversion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RETRACTION = 2
RESTATEMENT = 3
'''
ConversionAdjustmentTypeEnum = ConversionAdjustmentTypeEnum() # For __getattribute__
class ConversionAdjustmentUploadErrorEnum(_CreateEnumTypeUponFirstAccess):
ConversionAdjustmentUploadErrorEnum = '''\
class ConversionAdjustmentUploadError(enum.IntEnum):
"""
Enum describing possible conversion adjustment upload errors.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The received error code is not known in this version.
TOO_RECENT_CONVERSION_ACTION (int): The specified conversion action was created too recently.
Please try the upload again after 4-6 hours have passed since the
conversion action was created.
INVALID_CONVERSION_ACTION (int): No conversion action of a supported ConversionActionType that matches the
provided information can be found for the customer.
CONVERSION_ALREADY_RETRACTED (int): A retraction was already reported for this conversion.
CONVERSION_NOT_FOUND (int): A conversion for the supplied combination of conversion
action and conversion identifier could not be found.
CONVERSION_EXPIRED (int): The specified conversion has already expired. Conversions expire after 55
days, after which adjustments cannot be reported against them.
ADJUSTMENT_PRECEDES_CONVERSION (int): The supplied adjustment date time precedes that of the original
conversion.
MORE_RECENT_RESTATEMENT_FOUND (int): A restatement with a more recent adjustment date time was already
reported for this conversion.
TOO_RECENT_CONVERSION (int): The conversion was created too recently.
CANNOT_RESTATE_CONVERSION_ACTION_THAT_ALWAYS_USES_DEFAULT_CONVERSION_VALUE (int): Restatements cannot be reported for a conversion action that always uses
the default value.
TOO_MANY_ADJUSTMENTS_IN_REQUEST (int): The request contained more than 2000 adjustments.
TOO_MANY_ADJUSTMENTS (int): The conversion has been adjusted too many times.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_RECENT_CONVERSION_ACTION = 2
INVALID_CONVERSION_ACTION = 3
CONVERSION_ALREADY_RETRACTED = 4
CONVERSION_NOT_FOUND = 5
CONVERSION_EXPIRED = 6
ADJUSTMENT_PRECEDES_CONVERSION = 7
MORE_RECENT_RESTATEMENT_FOUND = 8
TOO_RECENT_CONVERSION = 9
CANNOT_RESTATE_CONVERSION_ACTION_THAT_ALWAYS_USES_DEFAULT_CONVERSION_VALUE = 10
TOO_MANY_ADJUSTMENTS_IN_REQUEST = 11
TOO_MANY_ADJUSTMENTS = 12
'''
ConversionAdjustmentUploadErrorEnum = ConversionAdjustmentUploadErrorEnum() # For __getattribute__
class ConversionAttributionEventTypeEnum(_CreateEnumTypeUponFirstAccess):
ConversionAttributionEventTypeEnum = '''\
class ConversionAttributionEventType(enum.IntEnum):
"""
The event type of conversions that are attributed to.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represents value unknown in this version.
IMPRESSION (int): The conversion is attributed to an impression.
INTERACTION (int): The conversion is attributed to an interaction.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMPRESSION = 2
INTERACTION = 3
'''
ConversionAttributionEventTypeEnum = ConversionAttributionEventTypeEnum() # For __getattribute__
class ConversionLagBucketEnum(_CreateEnumTypeUponFirstAccess):
ConversionLagBucketEnum = '''\
class ConversionLagBucket(enum.IntEnum):
"""
Enum representing the number of days between impression and conversion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LESS_THAN_ONE_DAY (int): Conversion lag bucket from 0 to 1 day. 0 day is included, 1 day is not.
ONE_TO_TWO_DAYS (int): Conversion lag bucket from 1 to 2 days. 1 day is included, 2 days is not.
TWO_TO_THREE_DAYS (int): Conversion lag bucket from 2 to 3 days. 2 days is included,
3 days is not.
THREE_TO_FOUR_DAYS (int): Conversion lag bucket from 3 to 4 days. 3 days is included,
4 days is not.
FOUR_TO_FIVE_DAYS (int): Conversion lag bucket from 4 to 5 days. 4 days is included,
5 days is not.
FIVE_TO_SIX_DAYS (int): Conversion lag bucket from 5 to 6 days. 5 days is included,
6 days is not.
SIX_TO_SEVEN_DAYS (int): Conversion lag bucket from 6 to 7 days. 6 days is included,
7 days is not.
SEVEN_TO_EIGHT_DAYS (int): Conversion lag bucket from 7 to 8 days. 7 days is included,
8 days is not.
EIGHT_TO_NINE_DAYS (int): Conversion lag bucket from 8 to 9 days. 8 days is included,
9 days is not.
NINE_TO_TEN_DAYS (int): Conversion lag bucket from 9 to 10 days. 9 days is included,
10 days is not.
TEN_TO_ELEVEN_DAYS (int): Conversion lag bucket from 10 to 11 days. 10 days is included,
11 days is not.
ELEVEN_TO_TWELVE_DAYS (int): Conversion lag bucket from 11 to 12 days. 11 days is included,
12 days is not.
TWELVE_TO_THIRTEEN_DAYS (int): Conversion lag bucket from 12 to 13 days. 12 days is included,
13 days is not.
THIRTEEN_TO_FOURTEEN_DAYS (int): Conversion lag bucket from 13 to 14 days. 13 days is included,
14 days is not.
FOURTEEN_TO_TWENTY_ONE_DAYS (int): Conversion lag bucket from 14 to 21 days. 14 days is included,
21 days is not.
TWENTY_ONE_TO_THIRTY_DAYS (int): Conversion lag bucket from 21 to 30 days. 21 days is included,
30 days is not.
THIRTY_TO_FORTY_FIVE_DAYS (int): Conversion lag bucket from 30 to 45 days. 30 days is included,
45 days is not.
FORTY_FIVE_TO_SIXTY_DAYS (int): Conversion lag bucket from 45 to 60 days. 45 days is included,
60 days is not.
SIXTY_TO_NINETY_DAYS (int): Conversion lag bucket from 60 to 90 days. 60 days is included,
90 days is not.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LESS_THAN_ONE_DAY = 2
ONE_TO_TWO_DAYS = 3
TWO_TO_THREE_DAYS = 4
THREE_TO_FOUR_DAYS = 5
FOUR_TO_FIVE_DAYS = 6
FIVE_TO_SIX_DAYS = 7
SIX_TO_SEVEN_DAYS = 8
SEVEN_TO_EIGHT_DAYS = 9
EIGHT_TO_NINE_DAYS = 10
NINE_TO_TEN_DAYS = 11
TEN_TO_ELEVEN_DAYS = 12
ELEVEN_TO_TWELVE_DAYS = 13
TWELVE_TO_THIRTEEN_DAYS = 14
THIRTEEN_TO_FOURTEEN_DAYS = 15
FOURTEEN_TO_TWENTY_ONE_DAYS = 16
TWENTY_ONE_TO_THIRTY_DAYS = 17
THIRTY_TO_FORTY_FIVE_DAYS = 18
FORTY_FIVE_TO_SIXTY_DAYS = 19
SIXTY_TO_NINETY_DAYS = 20
'''
ConversionLagBucketEnum = ConversionLagBucketEnum() # For __getattribute__
class ConversionOrAdjustmentLagBucketEnum(_CreateEnumTypeUponFirstAccess):
ConversionOrAdjustmentLagBucketEnum = '''\
class ConversionOrAdjustmentLagBucket(enum.IntEnum):
"""
Enum representing the number of days between the impression and the
conversion or between the impression and adjustments to the conversion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CONVERSION_LESS_THAN_ONE_DAY (int): Conversion lag bucket from 0 to 1 day. 0 day is included, 1 day is not.
CONVERSION_ONE_TO_TWO_DAYS (int): Conversion lag bucket from 1 to 2 days. 1 day is included, 2 days is not.
CONVERSION_TWO_TO_THREE_DAYS (int): Conversion lag bucket from 2 to 3 days. 2 days is included,
3 days is not.
CONVERSION_THREE_TO_FOUR_DAYS (int): Conversion lag bucket from 3 to 4 days. 3 days is included,
4 days is not.
CONVERSION_FOUR_TO_FIVE_DAYS (int): Conversion lag bucket from 4 to 5 days. 4 days is included,
5 days is not.
CONVERSION_FIVE_TO_SIX_DAYS (int): Conversion lag bucket from 5 to 6 days. 5 days is included,
6 days is not.
CONVERSION_SIX_TO_SEVEN_DAYS (int): Conversion lag bucket from 6 to 7 days. 6 days is included,
7 days is not.
CONVERSION_SEVEN_TO_EIGHT_DAYS (int): Conversion lag bucket from 7 to 8 days. 7 days is included,
8 days is not.
CONVERSION_EIGHT_TO_NINE_DAYS (int): Conversion lag bucket from 8 to 9 days. 8 days is included,
9 days is not.
CONVERSION_NINE_TO_TEN_DAYS (int): Conversion lag bucket from 9 to 10 days. 9 days is included,
10 days is not.
CONVERSION_TEN_TO_ELEVEN_DAYS (int): Conversion lag bucket from 10 to 11 days. 10 days is included,
11 days is not.
CONVERSION_ELEVEN_TO_TWELVE_DAYS (int): Conversion lag bucket from 11 to 12 days. 11 days is included,
12 days is not.
CONVERSION_TWELVE_TO_THIRTEEN_DAYS (int): Conversion lag bucket from 12 to 13 days. 12 days is included,
13 days is not.
CONVERSION_THIRTEEN_TO_FOURTEEN_DAYS (int): Conversion lag bucket from 13 to 14 days. 13 days is included,
14 days is not.
CONVERSION_FOURTEEN_TO_TWENTY_ONE_DAYS (int): Conversion lag bucket from 14 to 21 days. 14 days is included,
21 days is not.
CONVERSION_TWENTY_ONE_TO_THIRTY_DAYS (int): Conversion lag bucket from 21 to 30 days. 21 days is included,
30 days is not.
CONVERSION_THIRTY_TO_FORTY_FIVE_DAYS (int): Conversion lag bucket from 30 to 45 days. 30 days is included,
45 days is not.
CONVERSION_FORTY_FIVE_TO_SIXTY_DAYS (int): Conversion lag bucket from 45 to 60 days. 45 days is included,
60 days is not.
CONVERSION_SIXTY_TO_NINETY_DAYS (int): Conversion lag bucket from 60 to 90 days. 60 days is included,
90 days is not.
ADJUSTMENT_LESS_THAN_ONE_DAY (int): Conversion adjustment lag bucket from 0 to 1 day. 0 day is included,
1 day is not.
ADJUSTMENT_ONE_TO_TWO_DAYS (int): Conversion adjustment lag bucket from 1 to 2 days. 1 day is included,
2 days is not.
ADJUSTMENT_TWO_TO_THREE_DAYS (int): Conversion adjustment lag bucket from 2 to 3 days. 2 days is included,
3 days is not.
ADJUSTMENT_THREE_TO_FOUR_DAYS (int): Conversion adjustment lag bucket from 3 to 4 days. 3 days is included,
4 days is not.
ADJUSTMENT_FOUR_TO_FIVE_DAYS (int): Conversion adjustment lag bucket from 4 to 5 days. 4 days is included,
5 days is not.
ADJUSTMENT_FIVE_TO_SIX_DAYS (int): Conversion adjustment lag bucket from 5 to 6 days. 5 days is included,
6 days is not.
ADJUSTMENT_SIX_TO_SEVEN_DAYS (int): Conversion adjustment lag bucket from 6 to 7 days. 6 days is included,
7 days is not.
ADJUSTMENT_SEVEN_TO_EIGHT_DAYS (int): Conversion adjustment lag bucket from 7 to 8 days. 7 days is included,
8 days is not.
ADJUSTMENT_EIGHT_TO_NINE_DAYS (int): Conversion adjustment lag bucket from 8 to 9 days. 8 days is included,
9 days is not.
ADJUSTMENT_NINE_TO_TEN_DAYS (int): Conversion adjustment lag bucket from 9 to 10 days. 9 days is included,
10 days is not.
ADJUSTMENT_TEN_TO_ELEVEN_DAYS (int): Conversion adjustment lag bucket from 10 to 11 days. 10 days is included,
11 days is not.
ADJUSTMENT_ELEVEN_TO_TWELVE_DAYS (int): Conversion adjustment lag bucket from 11 to 12 days. 11 days is included,
12 days is not.
ADJUSTMENT_TWELVE_TO_THIRTEEN_DAYS (int): Conversion adjustment lag bucket from 12 to 13 days. 12 days is included,
13 days is not.
ADJUSTMENT_THIRTEEN_TO_FOURTEEN_DAYS (int): Conversion adjustment lag bucket from 13 to 14 days. 13 days is included,
14 days is not.
ADJUSTMENT_FOURTEEN_TO_TWENTY_ONE_DAYS (int): Conversion adjustment lag bucket from 14 to 21 days. 14 days is included,
21 days is not.
ADJUSTMENT_TWENTY_ONE_TO_THIRTY_DAYS (int): Conversion adjustment lag bucket from 21 to 30 days. 21 days is included,
30 days is not.
ADJUSTMENT_THIRTY_TO_FORTY_FIVE_DAYS (int): Conversion adjustment lag bucket from 30 to 45 days. 30 days is included,
45 days is not.
ADJUSTMENT_FORTY_FIVE_TO_SIXTY_DAYS (int): Conversion adjustment lag bucket from 45 to 60 days. 45 days is included,
60 days is not.
ADJUSTMENT_SIXTY_TO_NINETY_DAYS (int): Conversion adjustment lag bucket from 60 to 90 days. 60 days is included,
90 days is not.
ADJUSTMENT_NINETY_TO_ONE_HUNDRED_AND_FORTY_FIVE_DAYS (int): Conversion adjustment lag bucket from 90 to 145 days. 90 days is
included, 145 days is not.
CONVERSION_UNKNOWN (int): Conversion lag bucket UNKNOWN. This is for dates before conversion lag
bucket was available in Google Ads.
ADJUSTMENT_UNKNOWN (int): Conversion adjustment lag bucket UNKNOWN. This is for dates before
conversion adjustment lag bucket was available in Google Ads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONVERSION_LESS_THAN_ONE_DAY = 2
CONVERSION_ONE_TO_TWO_DAYS = 3
CONVERSION_TWO_TO_THREE_DAYS = 4
CONVERSION_THREE_TO_FOUR_DAYS = 5
CONVERSION_FOUR_TO_FIVE_DAYS = 6
CONVERSION_FIVE_TO_SIX_DAYS = 7
CONVERSION_SIX_TO_SEVEN_DAYS = 8
CONVERSION_SEVEN_TO_EIGHT_DAYS = 9
CONVERSION_EIGHT_TO_NINE_DAYS = 10
CONVERSION_NINE_TO_TEN_DAYS = 11
CONVERSION_TEN_TO_ELEVEN_DAYS = 12
CONVERSION_ELEVEN_TO_TWELVE_DAYS = 13
CONVERSION_TWELVE_TO_THIRTEEN_DAYS = 14
CONVERSION_THIRTEEN_TO_FOURTEEN_DAYS = 15
CONVERSION_FOURTEEN_TO_TWENTY_ONE_DAYS = 16
CONVERSION_TWENTY_ONE_TO_THIRTY_DAYS = 17
CONVERSION_THIRTY_TO_FORTY_FIVE_DAYS = 18
CONVERSION_FORTY_FIVE_TO_SIXTY_DAYS = 19
CONVERSION_SIXTY_TO_NINETY_DAYS = 20
ADJUSTMENT_LESS_THAN_ONE_DAY = 21
ADJUSTMENT_ONE_TO_TWO_DAYS = 22
ADJUSTMENT_TWO_TO_THREE_DAYS = 23
ADJUSTMENT_THREE_TO_FOUR_DAYS = 24
ADJUSTMENT_FOUR_TO_FIVE_DAYS = 25
ADJUSTMENT_FIVE_TO_SIX_DAYS = 26
ADJUSTMENT_SIX_TO_SEVEN_DAYS = 27
ADJUSTMENT_SEVEN_TO_EIGHT_DAYS = 28
ADJUSTMENT_EIGHT_TO_NINE_DAYS = 29
ADJUSTMENT_NINE_TO_TEN_DAYS = 30
ADJUSTMENT_TEN_TO_ELEVEN_DAYS = 31
ADJUSTMENT_ELEVEN_TO_TWELVE_DAYS = 32
ADJUSTMENT_TWELVE_TO_THIRTEEN_DAYS = 33
ADJUSTMENT_THIRTEEN_TO_FOURTEEN_DAYS = 34
ADJUSTMENT_FOURTEEN_TO_TWENTY_ONE_DAYS = 35
ADJUSTMENT_TWENTY_ONE_TO_THIRTY_DAYS = 36
ADJUSTMENT_THIRTY_TO_FORTY_FIVE_DAYS = 37
ADJUSTMENT_FORTY_FIVE_TO_SIXTY_DAYS = 38
ADJUSTMENT_SIXTY_TO_NINETY_DAYS = 39
ADJUSTMENT_NINETY_TO_ONE_HUNDRED_AND_FORTY_FIVE_DAYS = 40
CONVERSION_UNKNOWN = 41
ADJUSTMENT_UNKNOWN = 42
'''
ConversionOrAdjustmentLagBucketEnum = ConversionOrAdjustmentLagBucketEnum() # For __getattribute__
class ConversionUploadErrorEnum(_CreateEnumTypeUponFirstAccess):
ConversionUploadErrorEnum = '''\
class ConversionUploadError(enum.IntEnum):
"""
Enum describing possible conversion upload errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_MANY_CONVERSIONS_IN_REQUEST (int): The request contained more than 2000 conversions.
UNPARSEABLE_GCLID (int): The specified gclid could not be decoded.
CONVERSION_PRECEDES_GCLID (int): The specified conversion\_date\_time is before the event time associated
with the given gclid.
EXPIRED_GCLID (int): The click associated with the given gclid is either too old to be
imported or occurred outside of the click through lookback window for the
specified conversion action.
TOO_RECENT_GCLID (int): The click associated with the given gclid occurred too recently. Please
try uploading again after 6 hours have passed since the click occurred.
GCLID_NOT_FOUND (int): The click associated with the given gclid could not be found in the
system. This can happen if Google Click IDs are collected for non Google
Ads clicks.
UNAUTHORIZED_CUSTOMER (int): The click associated with the given gclid is owned by a customer
account that the uploading customer does not manage.
INVALID_CONVERSION_ACTION (int): No upload eligible conversion action that matches the provided
information can be found for the customer.
TOO_RECENT_CONVERSION_ACTION (int): The specified conversion action was created too recently.
Please try the upload again after 4-6 hours have passed since the
conversion action was created.
CONVERSION_TRACKING_NOT_ENABLED_AT_IMPRESSION_TIME (int): The click associated with the given gclid does not contain conversion
tracking information.
EXTERNAL_ATTRIBUTION_DATA_SET_FOR_NON_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION (int): The specified conversion action does not use an external attribution
model, but external\_attribution\_data was set.
EXTERNAL_ATTRIBUTION_DATA_NOT_SET_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION (int): The specified conversion action uses an external attribution model, but
external\_attribution\_data or one of its contained fields was not set.
Both external\_attribution\_credit and external\_attribution\_model must
be set for externally attributed conversion actions.
ORDER_ID_NOT_PERMITTED_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION (int): Order IDs are not supported for conversion actions which use an external
attribution model.
ORDER_ID_ALREADY_IN_USE (int): A conversion with the same order id and conversion action combination
already exists in our system.
DUPLICATE_ORDER_ID (int): The request contained two or more conversions with the same order id and
conversion action combination.
TOO_RECENT_CALL (int): The call occurred too recently. Please try uploading again after 6 hours
have passed since the call occurred.
EXPIRED_CALL (int): The click that initiated the call is too old for this conversion to be
imported.
CALL_NOT_FOUND (int): The call or the click leading to the call was not found.
CONVERSION_PRECEDES_CALL (int): The specified conversion\_date\_time is before the
call\_start\_date\_time.
CONVERSION_TRACKING_NOT_ENABLED_AT_CALL_TIME (int): The click associated with the call does not contain conversion tracking
information.
UNPARSEABLE_CALLERS_PHONE_NUMBER (int): The caller’s phone number cannot be parsed. It should be formatted either
as E.164 "+16502531234", International "+64 3-331 6005" or US national
number "6502531234".
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_MANY_CONVERSIONS_IN_REQUEST = 2
UNPARSEABLE_GCLID = 3
CONVERSION_PRECEDES_GCLID = 4
EXPIRED_GCLID = 5
TOO_RECENT_GCLID = 6
GCLID_NOT_FOUND = 7
UNAUTHORIZED_CUSTOMER = 8
INVALID_CONVERSION_ACTION = 9
TOO_RECENT_CONVERSION_ACTION = 10
CONVERSION_TRACKING_NOT_ENABLED_AT_IMPRESSION_TIME = 11
EXTERNAL_ATTRIBUTION_DATA_SET_FOR_NON_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION = 12
EXTERNAL_ATTRIBUTION_DATA_NOT_SET_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION = 13
ORDER_ID_NOT_PERMITTED_FOR_EXTERNALLY_ATTRIBUTED_CONVERSION_ACTION = 14
ORDER_ID_ALREADY_IN_USE = 15
DUPLICATE_ORDER_ID = 16
TOO_RECENT_CALL = 17
EXPIRED_CALL = 18
CALL_NOT_FOUND = 19
CONVERSION_PRECEDES_CALL = 20
CONVERSION_TRACKING_NOT_ENABLED_AT_CALL_TIME = 21
UNPARSEABLE_CALLERS_PHONE_NUMBER = 22
'''
ConversionUploadErrorEnum = ConversionUploadErrorEnum() # For __getattribute__
class CountryCodeErrorEnum(_CreateEnumTypeUponFirstAccess):
CountryCodeErrorEnum = '''\
class CountryCodeError(enum.IntEnum):
"""
Enum describing country code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_COUNTRY_CODE (int): The country code is invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_COUNTRY_CODE = 2
'''
CountryCodeErrorEnum = CountryCodeErrorEnum() # For __getattribute__
class CriterionCategoryChannelAvailabilityModeEnum(_CreateEnumTypeUponFirstAccess):
CriterionCategoryChannelAvailabilityModeEnum = '''\
class CriterionCategoryChannelAvailabilityMode(enum.IntEnum):
"""
Enum containing the possible CriterionCategoryChannelAvailabilityMode.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ALL_CHANNELS (int): The category is available to campaigns of all channel types and subtypes.
CHANNEL_TYPE_AND_ALL_SUBTYPES (int): The category is available to campaigns of a specific channel type,
including all subtypes under it.
CHANNEL_TYPE_AND_SUBSET_SUBTYPES (int): The category is available to campaigns of a specific channel type and
subtype(s).
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL_CHANNELS = 2
CHANNEL_TYPE_AND_ALL_SUBTYPES = 3
CHANNEL_TYPE_AND_SUBSET_SUBTYPES = 4
'''
CriterionCategoryChannelAvailabilityModeEnum = CriterionCategoryChannelAvailabilityModeEnum() # For __getattribute__
class CriterionCategoryLocaleAvailabilityModeEnum(_CreateEnumTypeUponFirstAccess):
CriterionCategoryLocaleAvailabilityModeEnum = '''\
class CriterionCategoryLocaleAvailabilityMode(enum.IntEnum):
"""
Enum containing the possible CriterionCategoryLocaleAvailabilityMode.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ALL_LOCALES (int): The category is available to campaigns of all locales.
COUNTRY_AND_ALL_LANGUAGES (int): The category is available to campaigns within a list of countries,
regardless of language.
LANGUAGE_AND_ALL_COUNTRIES (int): The category is available to campaigns within a list of languages,
regardless of country.
COUNTRY_AND_LANGUAGE (int): The category is available to campaigns within a list of country, language
pairs.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL_LOCALES = 2
COUNTRY_AND_ALL_LANGUAGES = 3
LANGUAGE_AND_ALL_COUNTRIES = 4
COUNTRY_AND_LANGUAGE = 5
'''
CriterionCategoryLocaleAvailabilityModeEnum = CriterionCategoryLocaleAvailabilityModeEnum() # For __getattribute__
class CriterionErrorEnum(_CreateEnumTypeUponFirstAccess):
CriterionErrorEnum = '''\
class CriterionError(enum.IntEnum):
"""
Enum describing possible criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CONCRETE_TYPE_REQUIRED (int): Concrete type of criterion is required for CREATE and UPDATE operations.
INVALID_EXCLUDED_CATEGORY (int): The category requested for exclusion is invalid.
INVALID_KEYWORD_TEXT (int): Invalid keyword criteria text.
KEYWORD_TEXT_TOO_LONG (int): Keyword text should be less than 80 chars.
KEYWORD_HAS_TOO_MANY_WORDS (int): Keyword text has too many words.
KEYWORD_HAS_INVALID_CHARS (int): Keyword text has invalid characters or symbols.
INVALID_PLACEMENT_URL (int): Invalid placement URL.
INVALID_USER_LIST (int): Invalid user list criterion.
INVALID_USER_INTEREST (int): Invalid user interest criterion.
INVALID_FORMAT_FOR_PLACEMENT_URL (int): Placement URL has wrong format.
PLACEMENT_URL_IS_TOO_LONG (int): Placement URL is too long.
PLACEMENT_URL_HAS_ILLEGAL_CHAR (int): Indicates the URL contains an illegal character.
PLACEMENT_URL_HAS_MULTIPLE_SITES_IN_LINE (int): Indicates the URL contains multiple comma separated URLs.
PLACEMENT_IS_NOT_AVAILABLE_FOR_TARGETING_OR_EXCLUSION (int): Indicates the domain is blacklisted.
INVALID_TOPIC_PATH (int): Invalid topic path.
INVALID_YOUTUBE_CHANNEL_ID (int): The YouTube Channel Id is invalid.
INVALID_YOUTUBE_VIDEO_ID (int): The YouTube Video Id is invalid.
YOUTUBE_VERTICAL_CHANNEL_DEPRECATED (int): Indicates the placement is a YouTube vertical channel, which is no longer
supported.
YOUTUBE_DEMOGRAPHIC_CHANNEL_DEPRECATED (int): Indicates the placement is a YouTube demographic channel, which is no
longer supported.
YOUTUBE_URL_UNSUPPORTED (int): YouTube urls are not supported in Placement criterion. Use YouTubeChannel
and YouTubeVideo criterion instead.
CANNOT_EXCLUDE_CRITERIA_TYPE (int): Criteria type can not be excluded by the customer, like AOL account type
cannot target site type criteria.
CANNOT_ADD_CRITERIA_TYPE (int): Criteria type can not be targeted.
INVALID_PRODUCT_FILTER (int): Product filter in the product criteria has invalid characters. Operand
and the argument in the filter can not have "==" or "&+".
PRODUCT_FILTER_TOO_LONG (int): Product filter in the product criteria is translated to a string as
operand1==argument1&+operand2==argument2, maximum allowed length for the
string is 255 chars.
CANNOT_EXCLUDE_SIMILAR_USER_LIST (int): Not allowed to exclude similar user list.
CANNOT_ADD_CLOSED_USER_LIST (int): Not allowed to target a closed user list.
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_ONLY_CAMPAIGNS (int): Not allowed to add display only UserLists to search only campaigns.
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_CAMPAIGNS (int): Not allowed to add display only UserLists to search plus campaigns.
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SHOPPING_CAMPAIGNS (int): Not allowed to add display only UserLists to shopping campaigns.
CANNOT_ADD_USER_INTERESTS_TO_SEARCH_CAMPAIGNS (int): Not allowed to add User interests to search only campaigns.
CANNOT_SET_BIDS_ON_CRITERION_TYPE_IN_SEARCH_CAMPAIGNS (int): Not allowed to set bids for this criterion type in search campaigns
CANNOT_ADD_URLS_TO_CRITERION_TYPE_FOR_CAMPAIGN_TYPE (int): Final URLs, URL Templates and CustomParameters cannot be set for the
criterion types of Gender, AgeRange, UserList, Placement, MobileApp, and
MobileAppCategory in search campaigns and shopping campaigns.
INVALID_CUSTOM_AFFINITY (int): Invalid custom affinity criterion.
INVALID_CUSTOM_INTENT (int): Invalid custom intent criterion.
INVALID_IP_ADDRESS (int): IP address is not valid.
INVALID_IP_FORMAT (int): IP format is not valid.
INVALID_MOBILE_APP (int): Mobile application is not valid.
INVALID_MOBILE_APP_CATEGORY (int): Mobile application category is not valid.
INVALID_CRITERION_ID (int): The CriterionId does not exist or is of the incorrect type.
CANNOT_TARGET_CRITERION (int): The Criterion is not allowed to be targeted.
CANNOT_TARGET_OBSOLETE_CRITERION (int): The criterion is not allowed to be targeted as it is deprecated.
CRITERION_ID_AND_TYPE_MISMATCH (int): The CriterionId is not valid for the type.
INVALID_PROXIMITY_RADIUS (int): Distance for the radius for the proximity criterion is invalid.
INVALID_PROXIMITY_RADIUS_UNITS (int): Units for the distance for the radius for the proximity criterion is
invalid.
INVALID_STREETADDRESS_LENGTH (int): Street address in the address is not valid.
INVALID_CITYNAME_LENGTH (int): City name in the address is not valid.
INVALID_REGIONCODE_LENGTH (int): Region code in the address is not valid.
INVALID_REGIONNAME_LENGTH (int): Region name in the address is not valid.
INVALID_POSTALCODE_LENGTH (int): Postal code in the address is not valid.
INVALID_COUNTRY_CODE (int): Country code in the address is not valid.
INVALID_LATITUDE (int): Latitude for the GeoPoint is not valid.
INVALID_LONGITUDE (int): Longitude for the GeoPoint is not valid.
PROXIMITY_GEOPOINT_AND_ADDRESS_BOTH_CANNOT_BE_NULL (int): The Proximity input is not valid. Both address and geoPoint cannot be
null.
INVALID_PROXIMITY_ADDRESS (int): The Proximity address cannot be geocoded to a valid lat/long.
INVALID_USER_DOMAIN_NAME (int): User domain name is not valid.
CRITERION_PARAMETER_TOO_LONG (int): Length of serialized criterion parameter exceeded size limit.
AD_SCHEDULE_TIME_INTERVALS_OVERLAP (int): Time interval in the AdSchedule overlaps with another AdSchedule.
AD_SCHEDULE_INTERVAL_CANNOT_SPAN_MULTIPLE_DAYS (int): AdSchedule time interval cannot span multiple days.
AD_SCHEDULE_INVALID_TIME_INTERVAL (int): AdSchedule time interval specified is invalid, endTime cannot be earlier
than startTime.
AD_SCHEDULE_EXCEEDED_INTERVALS_PER_DAY_LIMIT (int): The number of AdSchedule entries in a day exceeds the limit.
AD_SCHEDULE_CRITERION_ID_MISMATCHING_FIELDS (int): CriteriaId does not match the interval of the AdSchedule specified.
CANNOT_BID_MODIFY_CRITERION_TYPE (int): Cannot set bid modifier for this criterion type.
CANNOT_BID_MODIFY_CRITERION_CAMPAIGN_OPTED_OUT (int): Cannot bid modify criterion, since it is opted out of the campaign.
CANNOT_BID_MODIFY_NEGATIVE_CRITERION (int): Cannot set bid modifier for a negative criterion.
BID_MODIFIER_ALREADY_EXISTS (int): Bid Modifier already exists. Use SET operation to update.
FEED_ID_NOT_ALLOWED (int): Feed Id is not allowed in these Location Groups.
ACCOUNT_INELIGIBLE_FOR_CRITERIA_TYPE (int): The account may not use the requested criteria type. For example, some
accounts are restricted to keywords only.
CRITERIA_TYPE_INVALID_FOR_BIDDING_STRATEGY (int): The requested criteria type cannot be used with campaign or ad group
bidding strategy.
CANNOT_EXCLUDE_CRITERION (int): The Criterion is not allowed to be excluded.
CANNOT_REMOVE_CRITERION (int): The criterion is not allowed to be removed. For example, we cannot remove
any of the device criterion.
PRODUCT_SCOPE_TOO_LONG (int): The combined length of product dimension values of the product scope
criterion is too long.
PRODUCT_SCOPE_TOO_MANY_DIMENSIONS (int): Product scope contains too many dimensions.
PRODUCT_PARTITION_TOO_LONG (int): The combined length of product dimension values of the product partition
criterion is too long.
PRODUCT_PARTITION_TOO_MANY_DIMENSIONS (int): Product partition contains too many dimensions.
INVALID_PRODUCT_DIMENSION (int): The product dimension is invalid (e.g. dimension contains illegal value,
dimension type is represented with wrong class, etc). Product dimension
value can not contain "==" or "&+".
INVALID_PRODUCT_DIMENSION_TYPE (int): Product dimension type is either invalid for campaigns of this type or
cannot be used in the current context. BIDDING\_CATEGORY\_Lx and
PRODUCT\_TYPE\_Lx product dimensions must be used in ascending order of
their levels: L1, L2, L3, L4, L5... The levels must be specified
sequentially and start from L1. Furthermore, an "others" product
partition cannot be subdivided with a dimension of the same type but of
a higher level ("others" BIDDING\_CATEGORY\_L3 can be subdivided with
BRAND but not with BIDDING\_CATEGORY\_L4).
INVALID_PRODUCT_BIDDING_CATEGORY (int): Bidding categories do not form a valid path in the Shopping bidding
category taxonomy.
MISSING_SHOPPING_SETTING (int): ShoppingSetting must be added to the campaign before ProductScope
criteria can be added.
INVALID_MATCHING_FUNCTION (int): Matching function is invalid.
LOCATION_FILTER_NOT_ALLOWED (int): Filter parameters not allowed for location groups targeting.
INVALID_FEED_FOR_LOCATION_FILTER (int): Feed not found, or the feed is not an enabled location feed.
LOCATION_FILTER_INVALID (int): Given location filter parameter is invalid for location groups targeting.
CANNOT_ATTACH_CRITERIA_AT_CAMPAIGN_AND_ADGROUP (int): Criteria type cannot be associated with a campaign and its ad group(s)
simultaneously.
HOTEL_LENGTH_OF_STAY_OVERLAPS_WITH_EXISTING_CRITERION (int): Range represented by hotel length of stay's min nights and max nights
overlaps with an existing criterion.
HOTEL_ADVANCE_BOOKING_WINDOW_OVERLAPS_WITH_EXISTING_CRITERION (int): Range represented by hotel advance booking window's min days and max days
overlaps with an existing criterion.
FIELD_INCOMPATIBLE_WITH_NEGATIVE_TARGETING (int): The field is not allowed to be set when the negative field is set to
true, e.g. we don't allow bids in negative ad group or campaign criteria.
INVALID_WEBPAGE_CONDITION (int): The combination of operand and operator in webpage condition is invalid.
INVALID_WEBPAGE_CONDITION_URL (int): The URL of webpage condition is invalid.
WEBPAGE_CONDITION_URL_CANNOT_BE_EMPTY (int): The URL of webpage condition cannot be empty or contain white space.
WEBPAGE_CONDITION_URL_UNSUPPORTED_PROTOCOL (int): The URL of webpage condition contains an unsupported protocol.
WEBPAGE_CONDITION_URL_CANNOT_BE_IP_ADDRESS (int): The URL of webpage condition cannot be an IP address.
WEBPAGE_CONDITION_URL_DOMAIN_NOT_CONSISTENT_WITH_CAMPAIGN_SETTING (int): The domain of the URL is not consistent with the domain in campaign
setting.
WEBPAGE_CONDITION_URL_CANNOT_BE_PUBLIC_SUFFIX (int): The URL of webpage condition cannot be a public suffix itself.
WEBPAGE_CONDITION_URL_INVALID_PUBLIC_SUFFIX (int): The URL of webpage condition has an invalid public suffix.
WEBPAGE_CONDITION_URL_VALUE_TRACK_VALUE_NOT_SUPPORTED (int): Value track parameter is not supported in webpage condition URL.
WEBPAGE_CRITERION_URL_EQUALS_CAN_HAVE_ONLY_ONE_CONDITION (int): Only one URL-EQUALS webpage condition is allowed in a webpage
criterion and it cannot be combined with other conditions.
WEBPAGE_CRITERION_NOT_SUPPORTED_ON_NON_DSA_AD_GROUP (int): A webpage criterion cannot be added to a non-DSA ad group.
CANNOT_TARGET_USER_LIST_FOR_SMART_DISPLAY_CAMPAIGNS (int): Cannot add positive user list criteria in Smart Display campaigns.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONCRETE_TYPE_REQUIRED = 2
INVALID_EXCLUDED_CATEGORY = 3
INVALID_KEYWORD_TEXT = 4
KEYWORD_TEXT_TOO_LONG = 5
KEYWORD_HAS_TOO_MANY_WORDS = 6
KEYWORD_HAS_INVALID_CHARS = 7
INVALID_PLACEMENT_URL = 8
INVALID_USER_LIST = 9
INVALID_USER_INTEREST = 10
INVALID_FORMAT_FOR_PLACEMENT_URL = 11
PLACEMENT_URL_IS_TOO_LONG = 12
PLACEMENT_URL_HAS_ILLEGAL_CHAR = 13
PLACEMENT_URL_HAS_MULTIPLE_SITES_IN_LINE = 14
PLACEMENT_IS_NOT_AVAILABLE_FOR_TARGETING_OR_EXCLUSION = 15
INVALID_TOPIC_PATH = 16
INVALID_YOUTUBE_CHANNEL_ID = 17
INVALID_YOUTUBE_VIDEO_ID = 18
YOUTUBE_VERTICAL_CHANNEL_DEPRECATED = 19
YOUTUBE_DEMOGRAPHIC_CHANNEL_DEPRECATED = 20
YOUTUBE_URL_UNSUPPORTED = 21
CANNOT_EXCLUDE_CRITERIA_TYPE = 22
CANNOT_ADD_CRITERIA_TYPE = 23
INVALID_PRODUCT_FILTER = 24
PRODUCT_FILTER_TOO_LONG = 25
CANNOT_EXCLUDE_SIMILAR_USER_LIST = 26
CANNOT_ADD_CLOSED_USER_LIST = 27
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_ONLY_CAMPAIGNS = 28
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SEARCH_CAMPAIGNS = 29
CANNOT_ADD_DISPLAY_ONLY_LISTS_TO_SHOPPING_CAMPAIGNS = 30
CANNOT_ADD_USER_INTERESTS_TO_SEARCH_CAMPAIGNS = 31
CANNOT_SET_BIDS_ON_CRITERION_TYPE_IN_SEARCH_CAMPAIGNS = 32
CANNOT_ADD_URLS_TO_CRITERION_TYPE_FOR_CAMPAIGN_TYPE = 33
INVALID_CUSTOM_AFFINITY = 96
INVALID_CUSTOM_INTENT = 97
INVALID_IP_ADDRESS = 34
INVALID_IP_FORMAT = 35
INVALID_MOBILE_APP = 36
INVALID_MOBILE_APP_CATEGORY = 37
INVALID_CRITERION_ID = 38
CANNOT_TARGET_CRITERION = 39
CANNOT_TARGET_OBSOLETE_CRITERION = 40
CRITERION_ID_AND_TYPE_MISMATCH = 41
INVALID_PROXIMITY_RADIUS = 42
INVALID_PROXIMITY_RADIUS_UNITS = 43
INVALID_STREETADDRESS_LENGTH = 44
INVALID_CITYNAME_LENGTH = 45
INVALID_REGIONCODE_LENGTH = 46
INVALID_REGIONNAME_LENGTH = 47
INVALID_POSTALCODE_LENGTH = 48
INVALID_COUNTRY_CODE = 49
INVALID_LATITUDE = 50
INVALID_LONGITUDE = 51
PROXIMITY_GEOPOINT_AND_ADDRESS_BOTH_CANNOT_BE_NULL = 52
INVALID_PROXIMITY_ADDRESS = 53
INVALID_USER_DOMAIN_NAME = 54
CRITERION_PARAMETER_TOO_LONG = 55
AD_SCHEDULE_TIME_INTERVALS_OVERLAP = 56
AD_SCHEDULE_INTERVAL_CANNOT_SPAN_MULTIPLE_DAYS = 57
AD_SCHEDULE_INVALID_TIME_INTERVAL = 58
AD_SCHEDULE_EXCEEDED_INTERVALS_PER_DAY_LIMIT = 59
AD_SCHEDULE_CRITERION_ID_MISMATCHING_FIELDS = 60
CANNOT_BID_MODIFY_CRITERION_TYPE = 61
CANNOT_BID_MODIFY_CRITERION_CAMPAIGN_OPTED_OUT = 62
CANNOT_BID_MODIFY_NEGATIVE_CRITERION = 63
BID_MODIFIER_ALREADY_EXISTS = 64
FEED_ID_NOT_ALLOWED = 65
ACCOUNT_INELIGIBLE_FOR_CRITERIA_TYPE = 66
CRITERIA_TYPE_INVALID_FOR_BIDDING_STRATEGY = 67
CANNOT_EXCLUDE_CRITERION = 68
CANNOT_REMOVE_CRITERION = 69
PRODUCT_SCOPE_TOO_LONG = 70
PRODUCT_SCOPE_TOO_MANY_DIMENSIONS = 71
PRODUCT_PARTITION_TOO_LONG = 72
PRODUCT_PARTITION_TOO_MANY_DIMENSIONS = 73
INVALID_PRODUCT_DIMENSION = 74
INVALID_PRODUCT_DIMENSION_TYPE = 75
INVALID_PRODUCT_BIDDING_CATEGORY = 76
MISSING_SHOPPING_SETTING = 77
INVALID_MATCHING_FUNCTION = 78
LOCATION_FILTER_NOT_ALLOWED = 79
INVALID_FEED_FOR_LOCATION_FILTER = 98
LOCATION_FILTER_INVALID = 80
CANNOT_ATTACH_CRITERIA_AT_CAMPAIGN_AND_ADGROUP = 81
HOTEL_LENGTH_OF_STAY_OVERLAPS_WITH_EXISTING_CRITERION = 82
HOTEL_ADVANCE_BOOKING_WINDOW_OVERLAPS_WITH_EXISTING_CRITERION = 83
FIELD_INCOMPATIBLE_WITH_NEGATIVE_TARGETING = 84
INVALID_WEBPAGE_CONDITION = 85
INVALID_WEBPAGE_CONDITION_URL = 86
WEBPAGE_CONDITION_URL_CANNOT_BE_EMPTY = 87
WEBPAGE_CONDITION_URL_UNSUPPORTED_PROTOCOL = 88
WEBPAGE_CONDITION_URL_CANNOT_BE_IP_ADDRESS = 89
WEBPAGE_CONDITION_URL_DOMAIN_NOT_CONSISTENT_WITH_CAMPAIGN_SETTING = 90
WEBPAGE_CONDITION_URL_CANNOT_BE_PUBLIC_SUFFIX = 91
WEBPAGE_CONDITION_URL_INVALID_PUBLIC_SUFFIX = 92
WEBPAGE_CONDITION_URL_VALUE_TRACK_VALUE_NOT_SUPPORTED = 93
WEBPAGE_CRITERION_URL_EQUALS_CAN_HAVE_ONLY_ONE_CONDITION = 94
WEBPAGE_CRITERION_NOT_SUPPORTED_ON_NON_DSA_AD_GROUP = 95
CANNOT_TARGET_USER_LIST_FOR_SMART_DISPLAY_CAMPAIGNS = 99
'''
CriterionErrorEnum = CriterionErrorEnum() # For __getattribute__
class CriterionSystemServingStatusEnum(_CreateEnumTypeUponFirstAccess):
CriterionSystemServingStatusEnum = '''\
class CriterionSystemServingStatus(enum.IntEnum):
"""
Enumerates criterion system serving statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
ELIGIBLE (int): Eligible.
RARELY_SERVED (int): Low search volume.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ELIGIBLE = 2
RARELY_SERVED = 3
'''
CriterionSystemServingStatusEnum = CriterionSystemServingStatusEnum() # For __getattribute__
class CriterionTypeEnum(_CreateEnumTypeUponFirstAccess):
CriterionTypeEnum = '''\
class CriterionType(enum.IntEnum):
"""
Enum describing possible criterion types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
KEYWORD (int): Keyword. e.g. 'mars cruise'.
PLACEMENT (int): Placement, aka Website. e.g. 'www.flowers4sale.com'
MOBILE_APP_CATEGORY (int): Mobile application categories to target.
MOBILE_APPLICATION (int): Mobile applications to target.
DEVICE (int): Devices to target.
LOCATION (int): Locations to target.
LISTING_GROUP (int): Listing groups to target.
AD_SCHEDULE (int): Ad Schedule.
AGE_RANGE (int): Age range.
GENDER (int): Gender.
INCOME_RANGE (int): Income Range.
PARENTAL_STATUS (int): Parental status.
YOUTUBE_VIDEO (int): YouTube Video.
YOUTUBE_CHANNEL (int): YouTube Channel.
USER_LIST (int): User list.
PROXIMITY (int): Proximity.
TOPIC (int): A topic target on the display network (e.g. "Pets & Animals").
LISTING_SCOPE (int): Listing scope to target.
LANGUAGE (int): Language.
IP_BLOCK (int): IpBlock.
CONTENT_LABEL (int): Content Label for category exclusion.
CARRIER (int): Carrier.
USER_INTEREST (int): A category the user is interested in.
WEBPAGE (int): Webpage criterion for dynamic search ads.
OPERATING_SYSTEM_VERSION (int): Operating system version.
APP_PAYMENT_MODEL (int): App payment model.
MOBILE_DEVICE (int): Mobile device.
CUSTOM_AFFINITY (int): Custom affinity.
CUSTOM_INTENT (int): Custom intent.
LOCATION_GROUP (int): Location group.
"""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
PLACEMENT = 3
MOBILE_APP_CATEGORY = 4
MOBILE_APPLICATION = 5
DEVICE = 6
LOCATION = 7
LISTING_GROUP = 8
AD_SCHEDULE = 9
AGE_RANGE = 10
GENDER = 11
INCOME_RANGE = 12
PARENTAL_STATUS = 13
YOUTUBE_VIDEO = 14
YOUTUBE_CHANNEL = 15
USER_LIST = 16
PROXIMITY = 17
TOPIC = 18
LISTING_SCOPE = 19
LANGUAGE = 20
IP_BLOCK = 21
CONTENT_LABEL = 22
CARRIER = 23
USER_INTEREST = 24
WEBPAGE = 25
OPERATING_SYSTEM_VERSION = 26
APP_PAYMENT_MODEL = 27
MOBILE_DEVICE = 28
CUSTOM_AFFINITY = 29
CUSTOM_INTENT = 30
LOCATION_GROUP = 31
'''
CriterionTypeEnum = CriterionTypeEnum() # For __getattribute__
class CurrencyCodeErrorEnum(_CreateEnumTypeUponFirstAccess):
CurrencyCodeErrorEnum = '''\
class CurrencyCodeError(enum.IntEnum):
"""
Enum describing possible currency code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
UNSUPPORTED (int): The currency code is not supported.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNSUPPORTED = 2
'''
CurrencyCodeErrorEnum = CurrencyCodeErrorEnum() # For __getattribute__
class CustomInterestErrorEnum(_CreateEnumTypeUponFirstAccess):
CustomInterestErrorEnum = '''\
class CustomInterestError(enum.IntEnum):
"""
Enum describing possible custom interest errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NAME_ALREADY_USED (int): Duplicate custom interest name ignoring case.
CUSTOM_INTEREST_MEMBER_ID_AND_TYPE_PARAMETER_NOT_PRESENT_IN_REMOVE (int): In the remove custom interest member operation, both member ID and pair
[type, parameter] are not present.
TYPE_AND_PARAMETER_NOT_FOUND (int): The pair of [type, parameter] does not exist.
TYPE_AND_PARAMETER_ALREADY_EXISTED (int): The pair of [type, parameter] already exists.
INVALID_CUSTOM_INTEREST_MEMBER_TYPE (int): Unsupported custom interest member type.
CANNOT_REMOVE_WHILE_IN_USE (int): Cannot remove a custom interest while it's still being targeted.
CANNOT_CHANGE_TYPE (int): Cannot mutate custom interest type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NAME_ALREADY_USED = 2
CUSTOM_INTEREST_MEMBER_ID_AND_TYPE_PARAMETER_NOT_PRESENT_IN_REMOVE = 3
TYPE_AND_PARAMETER_NOT_FOUND = 4
TYPE_AND_PARAMETER_ALREADY_EXISTED = 5
INVALID_CUSTOM_INTEREST_MEMBER_TYPE = 6
CANNOT_REMOVE_WHILE_IN_USE = 7
CANNOT_CHANGE_TYPE = 8
'''
CustomInterestErrorEnum = CustomInterestErrorEnum() # For __getattribute__
class CustomInterestMemberTypeEnum(_CreateEnumTypeUponFirstAccess):
CustomInterestMemberTypeEnum = '''\
class CustomInterestMemberType(enum.IntEnum):
"""
Enum containing possible custom interest member types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
KEYWORD (int): Custom interest member type KEYWORD.
URL (int): Custom interest member type URL.
"""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
URL = 3
'''
CustomInterestMemberTypeEnum = CustomInterestMemberTypeEnum() # For __getattribute__
class CustomInterestStatusEnum(_CreateEnumTypeUponFirstAccess):
CustomInterestStatusEnum = '''\
class CustomInterestStatus(enum.IntEnum):
"""
Enum containing possible custom interest types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Enabled status - custom interest is enabled and can be targeted to.
REMOVED (int): Removed status - custom interest is removed and cannot be used for
targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
CustomInterestStatusEnum = CustomInterestStatusEnum() # For __getattribute__
class CustomInterestTypeEnum(_CreateEnumTypeUponFirstAccess):
CustomInterestTypeEnum = '''\
class CustomInterestType(enum.IntEnum):
"""
Enum containing possible custom interest types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CUSTOM_AFFINITY (int): Allows brand advertisers to define custom affinity audience lists.
CUSTOM_INTENT (int): Allows advertisers to define custom intent audience lists.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOM_AFFINITY = 2
CUSTOM_INTENT = 3
'''
CustomInterestTypeEnum = CustomInterestTypeEnum() # For __getattribute__
class CustomPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
CustomPlaceholderFieldEnum = '''\
class CustomPlaceholderField(enum.IntEnum):
"""
Possible values for Custom placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ID (int): Data Type: STRING. Required. Combination ID and ID2 must be unique per
offer.
ID2 (int): Data Type: STRING. Combination ID and ID2 must be unique per offer.
ITEM_TITLE (int): Data Type: STRING. Required. Main headline with product name to be shown
in dynamic ad.
ITEM_SUBTITLE (int): Data Type: STRING. Optional text to be shown in the image ad.
ITEM_DESCRIPTION (int): Data Type: STRING. Optional description of the product to be shown in the
ad.
ITEM_ADDRESS (int): Data Type: STRING. Full address of your offer or service, including
postal code. This will be used to identify the closest product to the
user when there are multiple offers in the feed that are relevant to the
user.
PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad. Highly recommended for
image ads.
ITEM_CATEGORY (int): Data Type: STRING. Used as a recommendation engine signal to serve items
in the same category.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs for the ad when using Upgraded URLs.
User will be redirected to these URLs when they click on an ad, or when
they click on a specific product for ads that have multiple products.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_IDS (int): Data Type: STRING\_LIST. List of recommended IDs to show together with
this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ID = 2
ID2 = 3
ITEM_TITLE = 4
ITEM_SUBTITLE = 5
ITEM_DESCRIPTION = 6
ITEM_ADDRESS = 7
PRICE = 8
FORMATTED_PRICE = 9
SALE_PRICE = 10
FORMATTED_SALE_PRICE = 11
IMAGE_URL = 12
ITEM_CATEGORY = 13
FINAL_URLS = 14
FINAL_MOBILE_URLS = 15
TRACKING_URL = 16
CONTEXTUAL_KEYWORDS = 17
ANDROID_APP_LINK = 18
SIMILAR_IDS = 19
IOS_APP_LINK = 20
IOS_APP_STORE_ID = 21
'''
CustomPlaceholderFieldEnum = CustomPlaceholderFieldEnum() # For __getattribute__
class CustomerClientLinkErrorEnum(_CreateEnumTypeUponFirstAccess):
CustomerClientLinkErrorEnum = '''\
class CustomerClientLinkError(enum.IntEnum):
"""
Enum describing possible CustomerClientLink errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CLIENT_ALREADY_INVITED_BY_THIS_MANAGER (int): Trying to manage a client that already in being managed by customer.
CLIENT_ALREADY_MANAGED_IN_HIERARCHY (int): Already managed by some other manager in the hierarchy.
CYCLIC_LINK_NOT_ALLOWED (int): Attempt to create a cycle in the hierarchy.
CUSTOMER_HAS_TOO_MANY_ACCOUNTS (int): Managed accounts has the maximum number of linked accounts.
CLIENT_HAS_TOO_MANY_INVITATIONS (int): Invitor has the maximum pending invitations.
CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS (int): Attempt to change hidden status of a link that is not active.
CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER (int): Parent manager account has the maximum number of linked accounts.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CLIENT_ALREADY_INVITED_BY_THIS_MANAGER = 2
CLIENT_ALREADY_MANAGED_IN_HIERARCHY = 3
CYCLIC_LINK_NOT_ALLOWED = 4
CUSTOMER_HAS_TOO_MANY_ACCOUNTS = 5
CLIENT_HAS_TOO_MANY_INVITATIONS = 6
CANNOT_HIDE_OR_UNHIDE_MANAGER_ACCOUNTS = 7
CUSTOMER_HAS_TOO_MANY_ACCOUNTS_AT_MANAGER = 8
'''
CustomerClientLinkErrorEnum = CustomerClientLinkErrorEnum() # For __getattribute__
class CustomerErrorEnum(_CreateEnumTypeUponFirstAccess):
CustomerErrorEnum = '''\
class CustomerError(enum.IntEnum):
"""
Set of errors that are related to requests dealing with Customer.
Next id: 26
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
STATUS_CHANGE_DISALLOWED (int): Customer status is not allowed to be changed from DRAFT and CLOSED.
Currency code and at least one of country code and time zone needs to be
set when status is changed to ENABLED.
ACCOUNT_NOT_SET_UP (int): CustomerService cannot get a customer that has not been fully set up.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STATUS_CHANGE_DISALLOWED = 2
ACCOUNT_NOT_SET_UP = 3
'''
CustomerErrorEnum = CustomerErrorEnum() # For __getattribute__
class CustomerFeedErrorEnum(_CreateEnumTypeUponFirstAccess):
CustomerFeedErrorEnum = '''\
class CustomerFeedError(enum.IntEnum):
"""
Enum describing possible customer feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active feed already exists for this customer and place holder type.
CANNOT_CREATE_FOR_REMOVED_FEED (int): The specified feed is removed.
CANNOT_CREATE_ALREADY_EXISTING_CUSTOMER_FEED (int): The CustomerFeed already exists. Update should be used to modify the
existing CustomerFeed.
CANNOT_MODIFY_REMOVED_CUSTOMER_FEED (int): Cannot update removed customer feed.
INVALID_PLACEHOLDER_TYPE (int): Invalid placeholder type.
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE (int): Feed mapping for this placeholder type does not exist.
PLACEHOLDER_TYPE_NOT_ALLOWED_ON_CUSTOMER_FEED (int): Placeholder not allowed at the account level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 2
CANNOT_CREATE_FOR_REMOVED_FEED = 3
CANNOT_CREATE_ALREADY_EXISTING_CUSTOMER_FEED = 4
CANNOT_MODIFY_REMOVED_CUSTOMER_FEED = 5
INVALID_PLACEHOLDER_TYPE = 6
MISSING_FEEDMAPPING_FOR_PLACEHOLDER_TYPE = 7
PLACEHOLDER_TYPE_NOT_ALLOWED_ON_CUSTOMER_FEED = 8
'''
CustomerFeedErrorEnum = CustomerFeedErrorEnum() # For __getattribute__
class CustomerManagerLinkErrorEnum(_CreateEnumTypeUponFirstAccess):
CustomerManagerLinkErrorEnum = '''\
class CustomerManagerLinkError(enum.IntEnum):
"""
Enum describing possible CustomerManagerLink errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NO_PENDING_INVITE (int): No pending invitation.
SAME_CLIENT_MORE_THAN_ONCE_PER_CALL (int): Attempt to operate on the same client more than once in the same call.
MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS (int): Manager account has the maximum number of linked accounts.
CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER (int): If no active user on account it cannot be unlinked from its manager.
CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER (int): Account should have at least one active owner on it before being
unlinked.
CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER (int): Only account owners may change their permission role.
CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT (int): When a client's link to its manager is not active, the link role cannot
be changed.
DUPLICATE_CHILD_FOUND (int): Attempt to link a child to a parent that contains or will contain
duplicate children.
TEST_ACCOUNT_LINKS_TOO_MANY_CHILD_ACCOUNTS (int): The authorized customer is a test account. It can add no more than the
allowed number of accounts
"""
UNSPECIFIED = 0
UNKNOWN = 1
NO_PENDING_INVITE = 2
SAME_CLIENT_MORE_THAN_ONCE_PER_CALL = 3
MANAGER_HAS_MAX_NUMBER_OF_LINKED_ACCOUNTS = 4
CANNOT_UNLINK_ACCOUNT_WITHOUT_ACTIVE_USER = 5
CANNOT_REMOVE_LAST_CLIENT_ACCOUNT_OWNER = 6
CANNOT_CHANGE_ROLE_BY_NON_ACCOUNT_OWNER = 7
CANNOT_CHANGE_ROLE_FOR_NON_ACTIVE_LINK_ACCOUNT = 8
DUPLICATE_CHILD_FOUND = 9
TEST_ACCOUNT_LINKS_TOO_MANY_CHILD_ACCOUNTS = 10
'''
CustomerManagerLinkErrorEnum = CustomerManagerLinkErrorEnum() # For __getattribute__
class CustomerMatchUploadKeyTypeEnum(_CreateEnumTypeUponFirstAccess):
CustomerMatchUploadKeyTypeEnum = '''\
class CustomerMatchUploadKeyType(enum.IntEnum):
"""
Enum describing possible customer match upload key types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CONTACT_INFO (int): Members are matched from customer info such as email address, phone
number or physical address.
CRM_ID (int): Members are matched from a user id generated and assigned by the
advertiser.
MOBILE_ADVERTISING_ID (int): Members are matched from mobile advertising ids.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONTACT_INFO = 2
CRM_ID = 3
MOBILE_ADVERTISING_ID = 4
'''
CustomerMatchUploadKeyTypeEnum = CustomerMatchUploadKeyTypeEnum() # For __getattribute__
class CustomerPayPerConversionEligibilityFailureReasonEnum(_CreateEnumTypeUponFirstAccess):
CustomerPayPerConversionEligibilityFailureReasonEnum = '''\
class CustomerPayPerConversionEligibilityFailureReason(enum.IntEnum):
"""
Enum describing possible reasons a customer is not eligible to use
PaymentMode.CONVERSIONS.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NOT_ENOUGH_CONVERSIONS (int): Customer does not have enough conversions.
CONVERSION_LAG_TOO_HIGH (int): Customer's conversion lag is too high.
HAS_CAMPAIGN_WITH_SHARED_BUDGET (int): Customer uses shared budgets.
HAS_UPLOAD_CLICKS_CONVERSION (int): Customer has conversions with ConversionActionType.UPLOAD\_CLICKS.
AVERAGE_DAILY_SPEND_TOO_HIGH (int): Customer's average daily spend is too high.
ANALYSIS_NOT_COMPLETE (int): Customer's eligibility has not yet been calculated by the Google Ads
backend. Check back soon.
OTHER (int): Customer is not eligible due to other reasons.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_ENOUGH_CONVERSIONS = 2
CONVERSION_LAG_TOO_HIGH = 3
HAS_CAMPAIGN_WITH_SHARED_BUDGET = 4
HAS_UPLOAD_CLICKS_CONVERSION = 5
AVERAGE_DAILY_SPEND_TOO_HIGH = 6
ANALYSIS_NOT_COMPLETE = 7
OTHER = 8
'''
CustomerPayPerConversionEligibilityFailureReasonEnum = CustomerPayPerConversionEligibilityFailureReasonEnum() # For __getattribute__
class DataDrivenModelStatusEnum(_CreateEnumTypeUponFirstAccess):
DataDrivenModelStatusEnum = '''\
class DataDrivenModelStatus(enum.IntEnum):
"""
Enumerates data driven model statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AVAILABLE (int): The data driven model is available.
STALE (int): The data driven model is stale. It hasn't been updated for at least 7
days. It is still being used, but will become expired if it does not get
updated for 30 days.
EXPIRED (int): The data driven model expired. It hasn't been updated for at least 30
days and cannot be used. Most commonly this is because there hasn't been
the required number of events in a recent 30-day period.
NEVER_GENERATED (int): The data driven model has never been generated. Most commonly this is
because there has never been the required number of events in any 30-day
period.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AVAILABLE = 2
STALE = 3
EXPIRED = 4
NEVER_GENERATED = 5
'''
DataDrivenModelStatusEnum = DataDrivenModelStatusEnum() # For __getattribute__
class DatabaseErrorEnum(_CreateEnumTypeUponFirstAccess):
DatabaseErrorEnum = '''\
class DatabaseError(enum.IntEnum):
"""
Enum describing possible database errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CONCURRENT_MODIFICATION (int): Multiple requests were attempting to modify the same resource at once.
Please retry the request.
DATA_CONSTRAINT_VIOLATION (int): The request conflicted with existing data. This error will usually be
replaced with a more specific error if the request is retried.
REQUEST_TOO_LARGE (int): The data written is too large. Please split the request into smaller
requests.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONCURRENT_MODIFICATION = 2
DATA_CONSTRAINT_VIOLATION = 3
REQUEST_TOO_LARGE = 4
'''
DatabaseErrorEnum = DatabaseErrorEnum() # For __getattribute__
class DateErrorEnum(_CreateEnumTypeUponFirstAccess):
DateErrorEnum = '''\
class DateError(enum.IntEnum):
"""
Enum describing possible date errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_FIELD_VALUES_IN_DATE (int): Given field values do not correspond to a valid date.
INVALID_FIELD_VALUES_IN_DATE_TIME (int): Given field values do not correspond to a valid date time.
INVALID_STRING_DATE (int): The string date's format should be yyyy-mm-dd.
INVALID_STRING_DATE_TIME_MICROS (int): The string date time's format should be yyyy-mm-dd hh:mm:ss.ssssss.
INVALID_STRING_DATE_TIME_SECONDS (int): The string date time's format should be yyyy-mm-dd hh:mm:ss.
INVALID_STRING_DATE_TIME_SECONDS_WITH_OFFSET (int): The string date time's format should be yyyy-mm-dd hh:mm:ss+|-hh:mm.
EARLIER_THAN_MINIMUM_DATE (int): Date is before allowed minimum.
LATER_THAN_MAXIMUM_DATE (int): Date is after allowed maximum.
DATE_RANGE_MINIMUM_DATE_LATER_THAN_MAXIMUM_DATE (int): Date range bounds are not in order.
DATE_RANGE_MINIMUM_AND_MAXIMUM_DATES_BOTH_NULL (int): Both dates in range are null.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_FIELD_VALUES_IN_DATE = 2
INVALID_FIELD_VALUES_IN_DATE_TIME = 3
INVALID_STRING_DATE = 4
INVALID_STRING_DATE_TIME_MICROS = 6
INVALID_STRING_DATE_TIME_SECONDS = 11
INVALID_STRING_DATE_TIME_SECONDS_WITH_OFFSET = 12
EARLIER_THAN_MINIMUM_DATE = 7
LATER_THAN_MAXIMUM_DATE = 8
DATE_RANGE_MINIMUM_DATE_LATER_THAN_MAXIMUM_DATE = 9
DATE_RANGE_MINIMUM_AND_MAXIMUM_DATES_BOTH_NULL = 10
'''
DateErrorEnum = DateErrorEnum() # For __getattribute__
class DateRangeErrorEnum(_CreateEnumTypeUponFirstAccess):
DateRangeErrorEnum = '''\
class DateRangeError(enum.IntEnum):
"""
Enum describing possible date range errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_DATE (int): Invalid date.
START_DATE_AFTER_END_DATE (int): The start date was after the end date.
CANNOT_SET_DATE_TO_PAST (int): Cannot set date to past time
AFTER_MAXIMUM_ALLOWABLE_DATE (int): A date was used that is past the system "last" date.
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED (int): Trying to change start date on a resource that has started.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_DATE = 2
START_DATE_AFTER_END_DATE = 3
CANNOT_SET_DATE_TO_PAST = 4
AFTER_MAXIMUM_ALLOWABLE_DATE = 5
CANNOT_MODIFY_START_DATE_IF_ALREADY_STARTED = 6
'''
DateRangeErrorEnum = DateRangeErrorEnum() # For __getattribute__
class DayOfWeekEnum(_CreateEnumTypeUponFirstAccess):
DayOfWeekEnum = '''\
class DayOfWeek(enum.IntEnum):
"""
Enumerates days of the week, e.g., "Monday".
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
MONDAY (int): Monday.
TUESDAY (int): Tuesday.
WEDNESDAY (int): Wednesday.
THURSDAY (int): Thursday.
FRIDAY (int): Friday.
SATURDAY (int): Saturday.
SUNDAY (int): Sunday.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
SUNDAY = 8
'''
DayOfWeekEnum = DayOfWeekEnum() # For __getattribute__
class DeviceEnum(_CreateEnumTypeUponFirstAccess):
DeviceEnum = '''\
class Device(enum.IntEnum):
"""
Enumerates Google Ads devices available for targeting.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
MOBILE (int): Mobile devices with full browsers.
TABLET (int): Tablets with full browsers.
DESKTOP (int): Computers.
CONNECTED_TV (int): Smart TVs and game consoles.
OTHER (int): Other device types.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
TABLET = 3
DESKTOP = 4
CONNECTED_TV = 6
OTHER = 5
'''
DeviceEnum = DeviceEnum() # For __getattribute__
class DisplayAdFormatSettingEnum(_CreateEnumTypeUponFirstAccess):
DisplayAdFormatSettingEnum = '''\
class DisplayAdFormatSetting(enum.IntEnum):
"""
Enumerates display ad format settings.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
ALL_FORMATS (int): Text, image and native formats.
NON_NATIVE (int): Text and image formats.
NATIVE (int): Native format, i.e. the format rendering is controlled by the publisher
and not by Google.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL_FORMATS = 2
NON_NATIVE = 3
NATIVE = 4
'''
DisplayAdFormatSettingEnum = DisplayAdFormatSettingEnum() # For __getattribute__
class DisplayUploadProductTypeEnum(_CreateEnumTypeUponFirstAccess):
DisplayUploadProductTypeEnum = '''\
class DisplayUploadProductType(enum.IntEnum):
"""
Enumerates display upload product types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
HTML5_UPLOAD_AD (int): HTML5 upload ad. This product type requires the upload\_media\_bundle
field in DisplayUploadAdInfo to be set.
DYNAMIC_HTML5_EDUCATION_AD (int): Dynamic HTML5 education ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in an education campaign.
DYNAMIC_HTML5_FLIGHT_AD (int): Dynamic HTML5 flight ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a flight campaign.
DYNAMIC_HTML5_HOTEL_RENTAL_AD (int): Dynamic HTML5 hotel and rental ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a hotel campaign.
DYNAMIC_HTML5_JOB_AD (int): Dynamic HTML5 job ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a job campaign.
DYNAMIC_HTML5_LOCAL_AD (int): Dynamic HTML5 local ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a local campaign.
DYNAMIC_HTML5_REAL_ESTATE_AD (int): Dynamic HTML5 real estate ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a real estate campaign.
DYNAMIC_HTML5_CUSTOM_AD (int): Dynamic HTML5 custom ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a custom campaign.
DYNAMIC_HTML5_TRAVEL_AD (int): Dynamic HTML5 travel ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a travel campaign.
DYNAMIC_HTML5_HOTEL_AD (int): Dynamic HTML5 hotel ad. This product type requires the
upload\_media\_bundle field in DisplayUploadAdInfo to be set. Can only
be used in a hotel campaign.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HTML5_UPLOAD_AD = 2
DYNAMIC_HTML5_EDUCATION_AD = 3
DYNAMIC_HTML5_FLIGHT_AD = 4
DYNAMIC_HTML5_HOTEL_RENTAL_AD = 5
DYNAMIC_HTML5_JOB_AD = 6
DYNAMIC_HTML5_LOCAL_AD = 7
DYNAMIC_HTML5_REAL_ESTATE_AD = 8
DYNAMIC_HTML5_CUSTOM_AD = 9
DYNAMIC_HTML5_TRAVEL_AD = 10
DYNAMIC_HTML5_HOTEL_AD = 11
'''
DisplayUploadProductTypeEnum = DisplayUploadProductTypeEnum() # For __getattribute__
class DistanceBucketEnum(_CreateEnumTypeUponFirstAccess):
DistanceBucketEnum = '''\
class DistanceBucket(enum.IntEnum):
"""
The distance bucket for a user’s distance from an advertiser’s location
extension.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
WITHIN_700M (int): User was within 700m of the location.
WITHIN_1KM (int): User was within 1KM of the location.
WITHIN_5KM (int): User was within 5KM of the location.
WITHIN_10KM (int): User was within 10KM of the location.
WITHIN_15KM (int): User was within 15KM of the location.
WITHIN_20KM (int): User was within 20KM of the location.
WITHIN_25KM (int): User was within 25KM of the location.
WITHIN_30KM (int): User was within 30KM of the location.
WITHIN_35KM (int): User was within 35KM of the location.
WITHIN_40KM (int): User was within 40KM of the location.
WITHIN_45KM (int): User was within 45KM of the location.
WITHIN_50KM (int): User was within 50KM of the location.
WITHIN_55KM (int): User was within 55KM of the location.
WITHIN_60KM (int): User was within 60KM of the location.
WITHIN_65KM (int): User was within 65KM of the location.
BEYOND_65KM (int): User was beyond 65KM of the location.
WITHIN_0_7MILES (int): User was within 0.7 miles of the location.
WITHIN_1MILE (int): User was within 1 mile of the location.
WITHIN_5MILES (int): User was within 5 miles of the location.
WITHIN_10MILES (int): User was within 10 miles of the location.
WITHIN_15MILES (int): User was within 15 miles of the location.
WITHIN_20MILES (int): User was within 20 miles of the location.
WITHIN_25MILES (int): User was within 25 miles of the location.
WITHIN_30MILES (int): User was within 30 miles of the location.
WITHIN_35MILES (int): User was within 35 miles of the location.
WITHIN_40MILES (int): User was within 40 miles of the location.
BEYOND_40MILES (int): User was beyond 40 miles of the location.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WITHIN_700M = 2
WITHIN_1KM = 3
WITHIN_5KM = 4
WITHIN_10KM = 5
WITHIN_15KM = 6
WITHIN_20KM = 7
WITHIN_25KM = 8
WITHIN_30KM = 9
WITHIN_35KM = 10
WITHIN_40KM = 11
WITHIN_45KM = 12
WITHIN_50KM = 13
WITHIN_55KM = 14
WITHIN_60KM = 15
WITHIN_65KM = 16
BEYOND_65KM = 17
WITHIN_0_7MILES = 18
WITHIN_1MILE = 19
WITHIN_5MILES = 20
WITHIN_10MILES = 21
WITHIN_15MILES = 22
WITHIN_20MILES = 23
WITHIN_25MILES = 24
WITHIN_30MILES = 25
WITHIN_35MILES = 26
WITHIN_40MILES = 27
BEYOND_40MILES = 28
'''
DistanceBucketEnum = DistanceBucketEnum() # For __getattribute__
class DistinctErrorEnum(_CreateEnumTypeUponFirstAccess):
DistinctErrorEnum = '''\
class DistinctError(enum.IntEnum):
"""
Enum describing possible distinct errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
DUPLICATE_ELEMENT (int): Duplicate element.
DUPLICATE_TYPE (int): Duplicate type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ELEMENT = 2
DUPLICATE_TYPE = 3
'''
DistinctErrorEnum = DistinctErrorEnum() # For __getattribute__
class DsaPageFeedCriterionFieldEnum(_CreateEnumTypeUponFirstAccess):
DsaPageFeedCriterionFieldEnum = '''\
class DsaPageFeedCriterionField(enum.IntEnum):
"""
Possible values for Dynamic Search Ad Page Feed criterion fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PAGE_URL (int): Data Type: URL or URL\_LIST. URL of the web page you want to target.
LABEL (int): Data Type: STRING\_LIST. The labels that will help you target ads within
your page feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PAGE_URL = 2
LABEL = 3
'''
DsaPageFeedCriterionFieldEnum = DsaPageFeedCriterionFieldEnum() # For __getattribute__
class EducationPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
EducationPlaceholderFieldEnum = '''\
class EducationPlaceholderField(enum.IntEnum):
"""
Possible values for Education placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROGRAM_ID (int): Data Type: STRING. Required. Combination of PROGRAM ID and LOCATION ID
must be unique per offer.
LOCATION_ID (int): Data Type: STRING. Combination of PROGRAM ID and LOCATION ID must be
unique per offer.
PROGRAM_NAME (int): Data Type: STRING. Required. Main headline with program name to be shown
in dynamic ad.
AREA_OF_STUDY (int): Data Type: STRING. Area of study that can be shown in dynamic ad.
PROGRAM_DESCRIPTION (int): Data Type: STRING. Description of program that can be shown in dynamic
ad.
SCHOOL_NAME (int): Data Type: STRING. Name of school that can be shown in dynamic ad.
ADDRESS (int): Data Type: STRING. Complete school address, including postal code.
THUMBNAIL_IMAGE_URL (int): Data Type: URL. Image to be displayed in ads.
ALTERNATIVE_THUMBNAIL_IMAGE_URL (int): Data Type: URL. Alternative hosted file of image to be used in the ad.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific program and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_PROGRAM_IDS (int): Data Type: STRING\_LIST. List of recommended program IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROGRAM_ID = 2
LOCATION_ID = 3
PROGRAM_NAME = 4
AREA_OF_STUDY = 5
PROGRAM_DESCRIPTION = 6
SCHOOL_NAME = 7
ADDRESS = 8
THUMBNAIL_IMAGE_URL = 9
ALTERNATIVE_THUMBNAIL_IMAGE_URL = 10
FINAL_URLS = 11
FINAL_MOBILE_URLS = 12
TRACKING_URL = 13
CONTEXTUAL_KEYWORDS = 14
ANDROID_APP_LINK = 15
SIMILAR_PROGRAM_IDS = 16
IOS_APP_LINK = 17
IOS_APP_STORE_ID = 18
'''
EducationPlaceholderFieldEnum = EducationPlaceholderFieldEnum() # For __getattribute__
class EnumErrorEnum(_CreateEnumTypeUponFirstAccess):
EnumErrorEnum = '''\
class EnumError(enum.IntEnum):
"""
Enum describing possible enum errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ENUM_VALUE_NOT_PERMITTED (int): The enum value is not permitted.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENUM_VALUE_NOT_PERMITTED = 3
'''
EnumErrorEnum = EnumErrorEnum() # For __getattribute__
class ExtensionFeedItemErrorEnum(_CreateEnumTypeUponFirstAccess):
ExtensionFeedItemErrorEnum = '''\
class ExtensionFeedItemError(enum.IntEnum):
"""
Enum describing possible extension feed item errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
VALUE_OUT_OF_RANGE (int): Value is not within the accepted range.
URL_LIST_TOO_LONG (int): Url list is too long.
CANNOT_HAVE_RESTRICTION_ON_EMPTY_GEO_TARGETING (int): Cannot have a geo targeting restriction without having geo targeting.
CANNOT_SET_WITH_FINAL_URLS (int): Cannot simultaneously set sitelink field with final urls.
CANNOT_SET_WITHOUT_FINAL_URLS (int): Must set field with final urls.
INVALID_PHONE_NUMBER (int): Phone number for a call extension is invalid.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number for a call extension is not supported for the given country
code.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): A carrier specific number in short format is not allowed for call
extensions.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate numbers are not allowed for call extensions.
DISALLOWED_NUMBER_TYPE (int): Phone number type for a call extension is not allowed.
For example, personal number is not allowed for a call extension in
most regions.
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT (int): Phone number for a call extension does not meet domestic format
requirements.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Vanity phone numbers (i.e. those including letters) are not allowed for
call extensions.
INVALID_CALL_CONVERSION_ACTION (int): Call conversion action provided for a call extension is invalid.
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING (int): For a call extension, the customer is not whitelisted for call tracking.
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported for the given country for a call
extension.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Customer hasn't consented for call recording, which is required for
creating/updating call feed items. Please see
https://support.google.com/google-ads/answer/7412639.
INVALID_APP_ID (int): App id provided for an app extension is invalid.
QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Quotation marks present in the review text for a review extension.
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET (int): Hyphen character present in the review text for a review extension.
REVIEW_EXTENSION_SOURCE_INELIGIBLE (int): A blacklisted review source name or url was provided for a review
extension.
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT (int): Review source name should not be found in the review text.
INCONSISTENT_CURRENCY_CODES (int): Inconsistent currency codes.
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS (int): Price extension cannot have duplicated headers.
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION (int): Price item cannot have duplicated header and description.
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS (int): Price extension has too few items.
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS (int): Price extension has too many items.
UNSUPPORTED_VALUE (int): The input value is not currently supported.
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE (int): The input value is not currently supported in the selected language of an
extension.
INVALID_DEVICE_PREFERENCE (int): Unknown or unsupported device preference.
INVALID_SCHEDULE_END (int): Invalid feed item schedule end time (i.e., endHour = 24 and endMinute !=
0).
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE (int): Date time zone does not match the account's time zone.
INVALID_SNIPPETS_HEADER (int): Invalid structured snippet header.
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM (int): Cannot operate on removed feed item.
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY (int): Phone number not supported when call tracking enabled for country.
CONFLICTING_CALL_CONVERSION_SETTINGS (int): Cannot set call\_conversion\_action while
call\_conversion\_tracking\_enabled is set to true.
EXTENSION_TYPE_MISMATCH (int): The type of the input extension feed item doesn't match the existing
extension feed item.
EXTENSION_SUBTYPE_REQUIRED (int): The oneof field extension i.e. subtype of extension feed item is
required.
EXTENSION_TYPE_UNSUPPORTED (int): The referenced feed item is not mapped to a supported extension type.
CANNOT_OPERATE_ON_FEED_WITH_MULTIPLE_MAPPINGS (int): Cannot operate on a Feed with more than one active FeedMapping.
CANNOT_OPERATE_ON_FEED_WITH_KEY_ATTRIBUTES (int): Cannot operate on a Feed that has key attributes.
INVALID_PRICE_FORMAT (int): Input price is not in a valid format.
PROMOTION_INVALID_TIME (int): The promotion time is invalid.
TOO_MANY_DECIMAL_PLACES_SPECIFIED (int): This field has too many decimal places specified.
CONCRETE_EXTENSION_TYPE_REQUIRED (int): Concrete sub type of ExtensionFeedItem is required for this operation.
SCHEDULE_END_NOT_AFTER_START (int): Feed item schedule end time must be after start time.
"""
UNSPECIFIED = 0
UNKNOWN = 1
VALUE_OUT_OF_RANGE = 2
URL_LIST_TOO_LONG = 3
CANNOT_HAVE_RESTRICTION_ON_EMPTY_GEO_TARGETING = 4
CANNOT_SET_WITH_FINAL_URLS = 5
CANNOT_SET_WITHOUT_FINAL_URLS = 6
INVALID_PHONE_NUMBER = 7
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 8
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 9
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 10
DISALLOWED_NUMBER_TYPE = 11
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT = 12
VANITY_PHONE_NUMBER_NOT_ALLOWED = 13
INVALID_CALL_CONVERSION_ACTION = 14
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING = 15
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 16
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 17
INVALID_APP_ID = 18
QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 19
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET = 20
REVIEW_EXTENSION_SOURCE_INELIGIBLE = 21
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT = 22
INCONSISTENT_CURRENCY_CODES = 23
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS = 24
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION = 25
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS = 26
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS = 27
UNSUPPORTED_VALUE = 28
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE = 29
INVALID_DEVICE_PREFERENCE = 30
INVALID_SCHEDULE_END = 31
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE = 32
INVALID_SNIPPETS_HEADER = 33
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM = 34
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY = 35
CONFLICTING_CALL_CONVERSION_SETTINGS = 36
EXTENSION_TYPE_MISMATCH = 37
EXTENSION_SUBTYPE_REQUIRED = 38
EXTENSION_TYPE_UNSUPPORTED = 39
CANNOT_OPERATE_ON_FEED_WITH_MULTIPLE_MAPPINGS = 40
CANNOT_OPERATE_ON_FEED_WITH_KEY_ATTRIBUTES = 41
INVALID_PRICE_FORMAT = 42
PROMOTION_INVALID_TIME = 43
TOO_MANY_DECIMAL_PLACES_SPECIFIED = 44
CONCRETE_EXTENSION_TYPE_REQUIRED = 45
SCHEDULE_END_NOT_AFTER_START = 46
'''
ExtensionFeedItemErrorEnum = ExtensionFeedItemErrorEnum() # For __getattribute__
class ExtensionSettingDeviceEnum(_CreateEnumTypeUponFirstAccess):
ExtensionSettingDeviceEnum = '''\
class ExtensionSettingDevice(enum.IntEnum):
"""
Possbile device types for an extension setting.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
MOBILE (int): Mobile. The extensions in the extension setting will only serve on
mobile devices.
DESKTOP (int): Desktop. The extensions in the extension setting will only serve on
desktop devices.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
DESKTOP = 3
'''
ExtensionSettingDeviceEnum = ExtensionSettingDeviceEnum() # For __getattribute__
class ExtensionSettingErrorEnum(_CreateEnumTypeUponFirstAccess):
ExtensionSettingErrorEnum = '''\
class ExtensionSettingError(enum.IntEnum):
"""
Enum describing possible extension setting errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EXTENSIONS_REQUIRED (int): A platform restriction was provided without input extensions or existing
extensions.
FEED_TYPE_EXTENSION_TYPE_MISMATCH (int): The provided feed type does not correspond to the provided extensions.
INVALID_FEED_TYPE (int): The provided feed type cannot be used.
INVALID_FEED_TYPE_FOR_CUSTOMER_EXTENSION_SETTING (int): The provided feed type cannot be used at the customer level.
CANNOT_CHANGE_FEED_ITEM_ON_CREATE (int): Cannot change a feed item field on a CREATE operation.
CANNOT_UPDATE_NEWLY_CREATED_EXTENSION (int): Cannot update an extension that is not already in this setting.
NO_EXISTING_AD_GROUP_EXTENSION_SETTING_FOR_TYPE (int): There is no existing AdGroupExtensionSetting for this type.
NO_EXISTING_CAMPAIGN_EXTENSION_SETTING_FOR_TYPE (int): There is no existing CampaignExtensionSetting for this type.
NO_EXISTING_CUSTOMER_EXTENSION_SETTING_FOR_TYPE (int): There is no existing CustomerExtensionSetting for this type.
AD_GROUP_EXTENSION_SETTING_ALREADY_EXISTS (int): The AdGroupExtensionSetting already exists. UPDATE should be used to
modify the existing AdGroupExtensionSetting.
CAMPAIGN_EXTENSION_SETTING_ALREADY_EXISTS (int): The CampaignExtensionSetting already exists. UPDATE should be used to
modify the existing CampaignExtensionSetting.
CUSTOMER_EXTENSION_SETTING_ALREADY_EXISTS (int): The CustomerExtensionSetting already exists. UPDATE should be used to
modify the existing CustomerExtensionSetting.
AD_GROUP_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active ad group feed already exists for this place holder type.
CAMPAIGN_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active campaign feed already exists for this place holder type.
CUSTOMER_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE (int): An active customer feed already exists for this place holder type.
VALUE_OUT_OF_RANGE (int): Value is not within the accepted range.
CANNOT_SET_FIELD_WITH_FINAL_URLS (int): Cannot simultaneously set specified field with final urls.
FINAL_URLS_NOT_SET (int): Must set field with final urls.
INVALID_PHONE_NUMBER (int): Phone number for a call extension is invalid.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number for a call extension is not supported for the given country
code.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): A carrier specific number in short format is not allowed for call
extensions.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate numbers are not allowed for call extensions.
DISALLOWED_NUMBER_TYPE (int): Phone number type for a call extension is not allowed.
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT (int): Phone number for a call extension does not meet domestic format
requirements.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Vanity phone numbers (i.e. those including letters) are not allowed for
call extensions.
INVALID_COUNTRY_CODE (int): Country code provided for a call extension is invalid.
INVALID_CALL_CONVERSION_TYPE_ID (int): Call conversion type id provided for a call extension is invalid.
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING (int): For a call extension, the customer is not whitelisted for call tracking.
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported for the given country for a call
extension.
INVALID_APP_ID (int): App id provided for an app extension is invalid.
QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Quotation marks present in the review text for a review extension.
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET (int): Hyphen character present in the review text for a review extension.
REVIEW_EXTENSION_SOURCE_NOT_ELIGIBLE (int): A blacklisted review source name or url was provided for a review
extension.
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT (int): Review source name should not be found in the review text.
MISSING_FIELD (int): Field must be set.
INCONSISTENT_CURRENCY_CODES (int): Inconsistent currency codes.
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS (int): Price extension cannot have duplicated headers.
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION (int): Price item cannot have duplicated header and description.
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS (int): Price extension has too few items
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS (int): Price extension has too many items
UNSUPPORTED_VALUE (int): The input value is not currently supported.
INVALID_DEVICE_PREFERENCE (int): Unknown or unsupported device preference.
INVALID_SCHEDULE_END (int): Invalid feed item schedule end time (i.e., endHour = 24 and
endMinute != 0).
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE (int): Date time zone does not match the account's time zone.
OVERLAPPING_SCHEDULES_NOT_ALLOWED (int): Overlapping feed item schedule times (e.g., 7-10AM and 8-11AM) are not
allowed.
SCHEDULE_END_NOT_AFTER_START (int): Feed item schedule end time must be after start time.
TOO_MANY_SCHEDULES_PER_DAY (int): There are too many feed item schedules per day.
DUPLICATE_EXTENSION_FEED_ITEM_EDIT (int): Cannot edit the same extension feed item more than once in the same
request.
INVALID_SNIPPETS_HEADER (int): Invalid structured snippet header.
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY (int): Phone number with call tracking enabled is not supported for the
specified country.
CAMPAIGN_TARGETING_MISMATCH (int): The targeted adgroup must belong to the targeted campaign.
CANNOT_OPERATE_ON_REMOVED_FEED (int): The feed used by the ExtensionSetting is removed and cannot be operated
on. Remove the ExtensionSetting to allow a new one to be created using
an active feed.
EXTENSION_TYPE_REQUIRED (int): The ExtensionFeedItem type is required for this operation.
INCOMPATIBLE_UNDERLYING_MATCHING_FUNCTION (int): The matching function that links the extension feed to the customer,
campaign, or ad group is not compatible with the ExtensionSetting
services.
START_DATE_AFTER_END_DATE (int): Start date must be before end date.
INVALID_PRICE_FORMAT (int): Input price is not in a valid format.
PROMOTION_INVALID_TIME (int): The promotion time is invalid.
PROMOTION_CANNOT_SET_PERCENT_DISCOUNT_AND_MONEY_DISCOUNT (int): Cannot set both percent discount and money discount fields.
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT (int): Cannot set both promotion code and orders over amount fields.
TOO_MANY_DECIMAL_PLACES_SPECIFIED (int): This field has too many decimal places specified.
INVALID_LANGUAGE_CODE (int): The language code is not valid.
UNSUPPORTED_LANGUAGE (int): The language is not supported.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Customer hasn't consented for call recording, which is required for
adding/updating call extensions. Please see
https://support.google.com/google-ads/answer/7412639.
EXTENSION_SETTING_UPDATE_IS_A_NOOP (int): The UPDATE operation does not specify any fields other than the resource
name in the update mask.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXTENSIONS_REQUIRED = 2
FEED_TYPE_EXTENSION_TYPE_MISMATCH = 3
INVALID_FEED_TYPE = 4
INVALID_FEED_TYPE_FOR_CUSTOMER_EXTENSION_SETTING = 5
CANNOT_CHANGE_FEED_ITEM_ON_CREATE = 6
CANNOT_UPDATE_NEWLY_CREATED_EXTENSION = 7
NO_EXISTING_AD_GROUP_EXTENSION_SETTING_FOR_TYPE = 8
NO_EXISTING_CAMPAIGN_EXTENSION_SETTING_FOR_TYPE = 9
NO_EXISTING_CUSTOMER_EXTENSION_SETTING_FOR_TYPE = 10
AD_GROUP_EXTENSION_SETTING_ALREADY_EXISTS = 11
CAMPAIGN_EXTENSION_SETTING_ALREADY_EXISTS = 12
CUSTOMER_EXTENSION_SETTING_ALREADY_EXISTS = 13
AD_GROUP_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 14
CAMPAIGN_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 15
CUSTOMER_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE = 16
VALUE_OUT_OF_RANGE = 17
CANNOT_SET_FIELD_WITH_FINAL_URLS = 18
FINAL_URLS_NOT_SET = 19
INVALID_PHONE_NUMBER = 20
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 21
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 22
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 23
DISALLOWED_NUMBER_TYPE = 24
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT = 25
VANITY_PHONE_NUMBER_NOT_ALLOWED = 26
INVALID_COUNTRY_CODE = 27
INVALID_CALL_CONVERSION_TYPE_ID = 28
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING = 29
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 30
INVALID_APP_ID = 31
QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 32
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET = 33
REVIEW_EXTENSION_SOURCE_NOT_ELIGIBLE = 34
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT = 35
MISSING_FIELD = 36
INCONSISTENT_CURRENCY_CODES = 37
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS = 38
PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION = 39
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS = 40
PRICE_EXTENSION_HAS_TOO_MANY_ITEMS = 41
UNSUPPORTED_VALUE = 42
INVALID_DEVICE_PREFERENCE = 43
INVALID_SCHEDULE_END = 45
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE = 47
OVERLAPPING_SCHEDULES_NOT_ALLOWED = 48
SCHEDULE_END_NOT_AFTER_START = 49
TOO_MANY_SCHEDULES_PER_DAY = 50
DUPLICATE_EXTENSION_FEED_ITEM_EDIT = 51
INVALID_SNIPPETS_HEADER = 52
PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY = 53
CAMPAIGN_TARGETING_MISMATCH = 54
CANNOT_OPERATE_ON_REMOVED_FEED = 55
EXTENSION_TYPE_REQUIRED = 56
INCOMPATIBLE_UNDERLYING_MATCHING_FUNCTION = 57
START_DATE_AFTER_END_DATE = 58
INVALID_PRICE_FORMAT = 59
PROMOTION_INVALID_TIME = 60
PROMOTION_CANNOT_SET_PERCENT_DISCOUNT_AND_MONEY_DISCOUNT = 61
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT = 62
TOO_MANY_DECIMAL_PLACES_SPECIFIED = 63
INVALID_LANGUAGE_CODE = 64
UNSUPPORTED_LANGUAGE = 65
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 66
EXTENSION_SETTING_UPDATE_IS_A_NOOP = 67
'''
ExtensionSettingErrorEnum = ExtensionSettingErrorEnum() # For __getattribute__
class ExtensionTypeEnum(_CreateEnumTypeUponFirstAccess):
ExtensionTypeEnum = '''\
class ExtensionType(enum.IntEnum):
"""
Possible data types for an extension in an extension setting.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NONE (int): None.
APP (int): App.
CALL (int): Call.
CALLOUT (int): Callout.
MESSAGE (int): Message.
PRICE (int): Price.
PROMOTION (int): Promotion.
SITELINK (int): Sitelink.
STRUCTURED_SNIPPET (int): Structured snippet.
LOCATION (int): Location.
AFFILIATE_LOCATION (int): Affiliate location.
HOTEL_CALLOUT (int): Hotel callout
"""
UNSPECIFIED = 0
UNKNOWN = 1
NONE = 2
APP = 3
CALL = 4
CALLOUT = 5
MESSAGE = 6
PRICE = 7
PROMOTION = 8
SITELINK = 10
STRUCTURED_SNIPPET = 11
LOCATION = 12
AFFILIATE_LOCATION = 13
HOTEL_CALLOUT = 15
'''
ExtensionTypeEnum = ExtensionTypeEnum() # For __getattribute__
class ExternalConversionSourceEnum(_CreateEnumTypeUponFirstAccess):
ExternalConversionSourceEnum = '''\
class ExternalConversionSource(enum.IntEnum):
"""
The external conversion source that is associated with a ConversionAction.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represents value unknown in this version.
WEBPAGE (int): Conversion that occurs when a user navigates to a particular webpage
after viewing an ad; Displayed in Google Ads UI as 'Website'.
ANALYTICS (int): Conversion that comes from linked Google Analytics goal or transaction;
Displayed in Google Ads UI as 'Analytics'.
UPLOAD (int): Website conversion that is uploaded through ConversionUploadService;
Displayed in Google Ads UI as 'Import from clicks'.
AD_CALL_METRICS (int): Conversion that occurs when a user clicks on a call extension directly on
an ad; Displayed in Google Ads UI as 'Calls from ads'.
WEBSITE_CALL_METRICS (int): Conversion that occurs when a user calls a dynamically-generated phone
number (by installed javascript) from an advertiser's website after
clicking on an ad; Displayed in Google Ads UI as 'Calls from website'.
STORE_VISITS (int): Conversion that occurs when a user visits an advertiser's retail store
after clicking on a Google ad;
Displayed in Google Ads UI as 'Store visits'.
ANDROID_IN_APP (int): Conversion that occurs when a user takes an in-app action such as a
purchase in an Android app;
Displayed in Google Ads UI as 'Android in-app action'.
IOS_IN_APP (int): Conversion that occurs when a user takes an in-app action such as a
purchase in an iOS app;
Displayed in Google Ads UI as 'iOS in-app action'.
IOS_FIRST_OPEN (int): Conversion that occurs when a user opens an iOS app for the first time;
Displayed in Google Ads UI as 'iOS app install (first open)'.
APP_UNSPECIFIED (int): Legacy app conversions that do not have an AppPlatform provided;
Displayed in Google Ads UI as 'Mobile app'.
ANDROID_FIRST_OPEN (int): Conversion that occurs when a user opens an Android app for the first
time; Displayed in Google Ads UI as 'Android app install (first open)'.
UPLOAD_CALLS (int): Call conversion that is uploaded through ConversionUploadService;
Displayed in Google Ads UI as 'Import from calls'.
FIREBASE (int): Conversion that comes from a linked Firebase event;
Displayed in Google Ads UI as 'Firebase'.
CLICK_TO_CALL (int): Conversion that occurs when a user clicks on a mobile phone number;
Displayed in Google Ads UI as 'Phone number clicks'.
SALESFORCE (int): Conversion that comes from Salesforce;
Displayed in Google Ads UI as 'Salesforce.com'.
STORE_SALES_CRM (int): Conversion that comes from in-store purchases recorded by CRM;
Displayed in Google Ads UI as 'Store sales (data partner)'.
STORE_SALES_PAYMENT_NETWORK (int): Conversion that comes from in-store purchases from payment network;
Displayed in Google Ads UI as 'Store sales (payment network)'.
GOOGLE_PLAY (int): Codeless Google Play conversion;
Displayed in Google Ads UI as 'Google Play'.
THIRD_PARTY_APP_ANALYTICS (int): Conversion that comes from a linked third-party app analytics event;
Displayed in Google Ads UI as 'Third-party app analytics'.
GOOGLE_ATTRIBUTION (int): Conversion that is controlled by Google Attribution.
STORE_SALES_DIRECT (int): Store Sales conversion based on first-party or third-party merchant data
uploads. Displayed in Google Ads UI as 'Store sales (direct)'.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBPAGE = 2
ANALYTICS = 3
UPLOAD = 4
AD_CALL_METRICS = 5
WEBSITE_CALL_METRICS = 6
STORE_VISITS = 7
ANDROID_IN_APP = 8
IOS_IN_APP = 9
IOS_FIRST_OPEN = 10
APP_UNSPECIFIED = 11
ANDROID_FIRST_OPEN = 12
UPLOAD_CALLS = 13
FIREBASE = 14
CLICK_TO_CALL = 15
SALESFORCE = 16
STORE_SALES_CRM = 17
STORE_SALES_PAYMENT_NETWORK = 18
GOOGLE_PLAY = 19
THIRD_PARTY_APP_ANALYTICS = 20
GOOGLE_ATTRIBUTION = 21
STORE_SALES_DIRECT = 22
'''
ExternalConversionSourceEnum = ExternalConversionSourceEnum() # For __getattribute__
class FeedAttributeOperation(_CreateEnumTypeUponFirstAccess):
FeedAttributeOperation = '''\
class Operator(enum.IntEnum):
"""
The operator.
Attributes:
UNSPECIFIED (int): Unspecified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADD (int): Add the attribute to the existing attributes.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADD = 2
'''
FeedAttributeOperation = FeedAttributeOperation() # For __getattribute__
class FeedAttributeReferenceErrorEnum(_CreateEnumTypeUponFirstAccess):
FeedAttributeReferenceErrorEnum = '''\
class FeedAttributeReferenceError(enum.IntEnum):
"""
Enum describing possible feed attribute reference errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_REFERENCE_REMOVED_FEED (int): A feed referenced by ID has been removed.
INVALID_FEED_NAME (int): There is no enabled feed with the given name.
INVALID_FEED_ATTRIBUTE_NAME (int): There is no feed attribute in an enabled feed with the given name.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_REFERENCE_REMOVED_FEED = 2
INVALID_FEED_NAME = 3
INVALID_FEED_ATTRIBUTE_NAME = 4
'''
FeedAttributeReferenceErrorEnum = FeedAttributeReferenceErrorEnum() # For __getattribute__
class FeedAttributeTypeEnum(_CreateEnumTypeUponFirstAccess):
FeedAttributeTypeEnum = '''\
class FeedAttributeType(enum.IntEnum):
"""
Possible data types for a feed attribute.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INT64 (int): Int64.
DOUBLE (int): Double.
STRING (int): String.
BOOLEAN (int): Boolean.
URL (int): Url.
DATE_TIME (int): Datetime.
INT64_LIST (int): Int64 list.
DOUBLE_LIST (int): Double (8 bytes) list.
STRING_LIST (int): String list.
BOOLEAN_LIST (int): Boolean list.
URL_LIST (int): Url list.
DATE_TIME_LIST (int): Datetime list.
PRICE (int): Price.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INT64 = 2
DOUBLE = 3
STRING = 4
BOOLEAN = 5
URL = 6
DATE_TIME = 7
INT64_LIST = 8
DOUBLE_LIST = 9
STRING_LIST = 10
BOOLEAN_LIST = 11
URL_LIST = 12
DATE_TIME_LIST = 13
PRICE = 14
'''
FeedAttributeTypeEnum = FeedAttributeTypeEnum() # For __getattribute__
class FeedErrorEnum(_CreateEnumTypeUponFirstAccess):
FeedErrorEnum = '''\
class FeedError(enum.IntEnum):
"""
Enum describing possible feed errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ATTRIBUTE_NAMES_NOT_UNIQUE (int): The names of the FeedAttributes must be unique.
ATTRIBUTES_DO_NOT_MATCH_EXISTING_ATTRIBUTES (int): The attribute list must be an exact copy of the existing list if the
attribute ID's are present.
CANNOT_SPECIFY_USER_ORIGIN_FOR_SYSTEM_FEED (int): Cannot specify USER origin for a system generated feed.
CANNOT_SPECIFY_GOOGLE_ORIGIN_FOR_NON_SYSTEM_FEED (int): Cannot specify GOOGLE origin for a non-system generated feed.
CANNOT_SPECIFY_FEED_ATTRIBUTES_FOR_SYSTEM_FEED (int): Cannot specify feed attributes for system feed.
CANNOT_UPDATE_FEED_ATTRIBUTES_WITH_ORIGIN_GOOGLE (int): Cannot update FeedAttributes on feed with origin GOOGLE.
FEED_REMOVED (int): The given ID refers to a removed Feed. Removed Feeds are immutable.
INVALID_ORIGIN_VALUE (int): The origin of the feed is not valid for the client.
FEED_ORIGIN_IS_NOT_USER (int): A user can only create and modify feeds with USER origin.
INVALID_AUTH_TOKEN_FOR_EMAIL (int): Invalid auth token for the given email.
INVALID_EMAIL (int): Invalid email specified.
DUPLICATE_FEED_NAME (int): Feed name matches that of another active Feed.
INVALID_FEED_NAME (int): Name of feed is not allowed.
MISSING_OAUTH_INFO (int): Missing OAuthInfo.
NEW_ATTRIBUTE_CANNOT_BE_PART_OF_UNIQUE_KEY (int): New FeedAttributes must not affect the unique key.
TOO_MANY_ATTRIBUTES (int): Too many FeedAttributes for a Feed.
INVALID_BUSINESS_ACCOUNT (int): The business account is not valid.
BUSINESS_ACCOUNT_CANNOT_ACCESS_LOCATION_ACCOUNT (int): Business account cannot access Google My Business account.
INVALID_AFFILIATE_CHAIN_ID (int): Invalid chain ID provided for affiliate location feed.
DUPLICATE_SYSTEM_FEED (int): There is already a feed with the given system feed generation data.
GMB_ACCESS_ERROR (int): An error occurred accessing GMB account.
CANNOT_HAVE_LOCATION_AND_AFFILIATE_LOCATION_FEEDS (int): A customer cannot have both LOCATION and AFFILIATE\_LOCATION feeds.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ATTRIBUTE_NAMES_NOT_UNIQUE = 2
ATTRIBUTES_DO_NOT_MATCH_EXISTING_ATTRIBUTES = 3
CANNOT_SPECIFY_USER_ORIGIN_FOR_SYSTEM_FEED = 4
CANNOT_SPECIFY_GOOGLE_ORIGIN_FOR_NON_SYSTEM_FEED = 5
CANNOT_SPECIFY_FEED_ATTRIBUTES_FOR_SYSTEM_FEED = 6
CANNOT_UPDATE_FEED_ATTRIBUTES_WITH_ORIGIN_GOOGLE = 7
FEED_REMOVED = 8
INVALID_ORIGIN_VALUE = 9
FEED_ORIGIN_IS_NOT_USER = 10
INVALID_AUTH_TOKEN_FOR_EMAIL = 11
INVALID_EMAIL = 12
DUPLICATE_FEED_NAME = 13
INVALID_FEED_NAME = 14
MISSING_OAUTH_INFO = 15
NEW_ATTRIBUTE_CANNOT_BE_PART_OF_UNIQUE_KEY = 16
TOO_MANY_ATTRIBUTES = 17
INVALID_BUSINESS_ACCOUNT = 18
BUSINESS_ACCOUNT_CANNOT_ACCESS_LOCATION_ACCOUNT = 19
INVALID_AFFILIATE_CHAIN_ID = 20
DUPLICATE_SYSTEM_FEED = 21
GMB_ACCESS_ERROR = 22
CANNOT_HAVE_LOCATION_AND_AFFILIATE_LOCATION_FEEDS = 23
'''
FeedErrorEnum = FeedErrorEnum() # For __getattribute__
class FeedItemErrorEnum(_CreateEnumTypeUponFirstAccess):
FeedItemErrorEnum = '''\
class FeedItemError(enum.IntEnum):
"""
Enum describing possible feed item errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_CONVERT_ATTRIBUTE_VALUE_FROM_STRING (int): Cannot convert the feed attribute value from string to its real type.
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM (int): Cannot operate on removed feed item.
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE (int): Date time zone does not match the account's time zone.
KEY_ATTRIBUTES_NOT_FOUND (int): Feed item with the key attributes could not be found.
INVALID_URL (int): Url feed attribute value is not valid.
MISSING_KEY_ATTRIBUTES (int): Some key attributes are missing.
KEY_ATTRIBUTES_NOT_UNIQUE (int): Feed item has same key attributes as another feed item.
CANNOT_MODIFY_KEY_ATTRIBUTE_VALUE (int): Cannot modify key attributes on an existing feed item.
SIZE_TOO_LARGE_FOR_MULTI_VALUE_ATTRIBUTE (int): The feed attribute value is too large.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_CONVERT_ATTRIBUTE_VALUE_FROM_STRING = 2
CANNOT_OPERATE_ON_REMOVED_FEED_ITEM = 3
DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE = 4
KEY_ATTRIBUTES_NOT_FOUND = 5
INVALID_URL = 6
MISSING_KEY_ATTRIBUTES = 7
KEY_ATTRIBUTES_NOT_UNIQUE = 8
CANNOT_MODIFY_KEY_ATTRIBUTE_VALUE = 9
SIZE_TOO_LARGE_FOR_MULTI_VALUE_ATTRIBUTE = 10
'''
FeedItemErrorEnum = FeedItemErrorEnum() # For __getattribute__
class FeedItemQualityApprovalStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedItemQualityApprovalStatusEnum = '''\
class FeedItemQualityApprovalStatus(enum.IntEnum):
"""
The possible quality evaluation approval statuses of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPROVED (int): Meets all quality expectations.
DISAPPROVED (int): Does not meet some quality expectations. The specific reason is found in
the quality\_disapproval\_reasons field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
'''
FeedItemQualityApprovalStatusEnum = FeedItemQualityApprovalStatusEnum() # For __getattribute__
class FeedItemQualityDisapprovalReasonEnum(_CreateEnumTypeUponFirstAccess):
FeedItemQualityDisapprovalReasonEnum = '''\
class FeedItemQualityDisapprovalReason(enum.IntEnum):
"""
The possible quality evaluation disapproval reasons of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PRICE_TABLE_REPETITIVE_HEADERS (int): Price contains repetitive headers.
PRICE_TABLE_REPETITIVE_DESCRIPTION (int): Price contains repetitive description.
PRICE_TABLE_INCONSISTENT_ROWS (int): Price contains inconsistent items.
PRICE_DESCRIPTION_HAS_PRICE_QUALIFIERS (int): Price contains qualifiers in description.
PRICE_UNSUPPORTED_LANGUAGE (int): Price contains an unsupported language.
PRICE_TABLE_ROW_HEADER_TABLE_TYPE_MISMATCH (int): Price item header is not relevant to the price type.
PRICE_TABLE_ROW_HEADER_HAS_PROMOTIONAL_TEXT (int): Price item header has promotional text.
PRICE_TABLE_ROW_DESCRIPTION_NOT_RELEVANT (int): Price item description is not relevant to the item header.
PRICE_TABLE_ROW_DESCRIPTION_HAS_PROMOTIONAL_TEXT (int): Price item description contains promotional text.
PRICE_TABLE_ROW_HEADER_DESCRIPTION_REPETITIVE (int): Price item header and description are repetitive.
PRICE_TABLE_ROW_UNRATEABLE (int): Price item is in a foreign language, nonsense, or can't be rated.
PRICE_TABLE_ROW_PRICE_INVALID (int): Price item price is invalid or inaccurate.
PRICE_TABLE_ROW_URL_INVALID (int): Price item URL is invalid or irrelevant.
PRICE_HEADER_OR_DESCRIPTION_HAS_PRICE (int): Price item header or description has price.
STRUCTURED_SNIPPETS_HEADER_POLICY_VIOLATED (int): Structured snippet values do not match the header.
STRUCTURED_SNIPPETS_REPEATED_VALUES (int): Structured snippet values are repeated.
STRUCTURED_SNIPPETS_EDITORIAL_GUIDELINES (int): Structured snippet values violate editorial guidelines like punctuation.
STRUCTURED_SNIPPETS_HAS_PROMOTIONAL_TEXT (int): Structured snippet contain promotional text.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PRICE_TABLE_REPETITIVE_HEADERS = 2
PRICE_TABLE_REPETITIVE_DESCRIPTION = 3
PRICE_TABLE_INCONSISTENT_ROWS = 4
PRICE_DESCRIPTION_HAS_PRICE_QUALIFIERS = 5
PRICE_UNSUPPORTED_LANGUAGE = 6
PRICE_TABLE_ROW_HEADER_TABLE_TYPE_MISMATCH = 7
PRICE_TABLE_ROW_HEADER_HAS_PROMOTIONAL_TEXT = 8
PRICE_TABLE_ROW_DESCRIPTION_NOT_RELEVANT = 9
PRICE_TABLE_ROW_DESCRIPTION_HAS_PROMOTIONAL_TEXT = 10
PRICE_TABLE_ROW_HEADER_DESCRIPTION_REPETITIVE = 11
PRICE_TABLE_ROW_UNRATEABLE = 12
PRICE_TABLE_ROW_PRICE_INVALID = 13
PRICE_TABLE_ROW_URL_INVALID = 14
PRICE_HEADER_OR_DESCRIPTION_HAS_PRICE = 15
STRUCTURED_SNIPPETS_HEADER_POLICY_VIOLATED = 16
STRUCTURED_SNIPPETS_REPEATED_VALUES = 17
STRUCTURED_SNIPPETS_EDITORIAL_GUIDELINES = 18
STRUCTURED_SNIPPETS_HAS_PROMOTIONAL_TEXT = 19
'''
FeedItemQualityDisapprovalReasonEnum = FeedItemQualityDisapprovalReasonEnum() # For __getattribute__
class FeedItemStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedItemStatusEnum = '''\
class FeedItemStatus(enum.IntEnum):
"""
Possible statuses of a feed item.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed item is enabled.
REMOVED (int): Feed item has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
FeedItemStatusEnum = FeedItemStatusEnum() # For __getattribute__
class FeedItemTargetDeviceEnum(_CreateEnumTypeUponFirstAccess):
FeedItemTargetDeviceEnum = '''\
class FeedItemTargetDevice(enum.IntEnum):
"""
Possible data types for a feed item target device.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MOBILE (int): Mobile.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
'''
FeedItemTargetDeviceEnum = FeedItemTargetDeviceEnum() # For __getattribute__
class FeedItemTargetErrorEnum(_CreateEnumTypeUponFirstAccess):
FeedItemTargetErrorEnum = '''\
class FeedItemTargetError(enum.IntEnum):
"""
Enum describing possible feed item target errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
MUST_SET_TARGET_ONEOF_ON_CREATE (int): On CREATE, the FeedItemTarget must have a populated field in the oneof
target.
FEED_ITEM_TARGET_ALREADY_EXISTS (int): The specified feed item target already exists, so it cannot be added.
FEED_ITEM_SCHEDULES_CANNOT_OVERLAP (int): The schedules for a given feed item cannot overlap.
TARGET_LIMIT_EXCEEDED_FOR_GIVEN_TYPE (int): Too many targets of a given type were added for a single feed item.
TOO_MANY_SCHEDULES_PER_DAY (int): Too many AdSchedules are enabled for the feed item for the given day.
CANNOT_HAVE_ENABLED_CAMPAIGN_AND_ENABLED_AD_GROUP_TARGETS (int): A feed item may either have an enabled campaign target or an enabled ad
group target.
DUPLICATE_AD_SCHEDULE (int): Duplicate ad schedules aren't allowed.
DUPLICATE_KEYWORD (int): Duplicate keywords aren't allowed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MUST_SET_TARGET_ONEOF_ON_CREATE = 2
FEED_ITEM_TARGET_ALREADY_EXISTS = 3
FEED_ITEM_SCHEDULES_CANNOT_OVERLAP = 4
TARGET_LIMIT_EXCEEDED_FOR_GIVEN_TYPE = 5
TOO_MANY_SCHEDULES_PER_DAY = 6
CANNOT_HAVE_ENABLED_CAMPAIGN_AND_ENABLED_AD_GROUP_TARGETS = 7
DUPLICATE_AD_SCHEDULE = 8
DUPLICATE_KEYWORD = 9
'''
FeedItemTargetErrorEnum = FeedItemTargetErrorEnum() # For __getattribute__
class FeedItemTargetStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedItemTargetStatusEnum = '''\
class FeedItemTargetStatus(enum.IntEnum):
"""
Possible statuses of a feed item target.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed item target is enabled.
REMOVED (int): Feed item target has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
FeedItemTargetStatusEnum = FeedItemTargetStatusEnum() # For __getattribute__
class FeedItemTargetTypeEnum(_CreateEnumTypeUponFirstAccess):
FeedItemTargetTypeEnum = '''\
class FeedItemTargetType(enum.IntEnum):
"""
Possible type of a feed item target.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN (int): Feed item targets a campaign.
AD_GROUP (int): Feed item targets an ad group.
CRITERION (int): Feed item targets a criterion.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN = 2
AD_GROUP = 3
CRITERION = 4
'''
FeedItemTargetTypeEnum = FeedItemTargetTypeEnum() # For __getattribute__
class FeedItemValidationErrorEnum(_CreateEnumTypeUponFirstAccess):
FeedItemValidationErrorEnum = '''\
class FeedItemValidationError(enum.IntEnum):
"""
The possible validation errors of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
STRING_TOO_SHORT (int): String is too short.
STRING_TOO_LONG (int): String is too long.
VALUE_NOT_SPECIFIED (int): Value is not provided.
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT (int): Phone number format is invalid for region.
INVALID_PHONE_NUMBER (int): String does not represent a phone number.
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number format is not compatible with country code.
PREMIUM_RATE_NUMBER_NOT_ALLOWED (int): Premium rate number is not allowed.
DISALLOWED_NUMBER_TYPE (int): Phone number type is not allowed.
VALUE_OUT_OF_RANGE (int): Specified value is outside of the valid range.
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Call tracking is not supported in the selected country.
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING (int): Customer is not whitelisted for call tracking.
INVALID_COUNTRY_CODE (int): Country code is invalid.
INVALID_APP_ID (int): The specified mobile app id is invalid.
MISSING_ATTRIBUTES_FOR_FIELDS (int): Some required field attributes are missing.
INVALID_TYPE_ID (int): Invalid email button type for email extension.
INVALID_EMAIL_ADDRESS (int): Email address is invalid.
INVALID_HTTPS_URL (int): The HTTPS URL in email extension is invalid.
MISSING_DELIVERY_ADDRESS (int): Delivery address is missing from email extension.
START_DATE_AFTER_END_DATE (int): FeedItem scheduling start date comes after end date.
MISSING_FEED_ITEM_START_TIME (int): FeedItem scheduling start time is missing.
MISSING_FEED_ITEM_END_TIME (int): FeedItem scheduling end time is missing.
MISSING_FEED_ITEM_ID (int): Cannot compute system attributes on a FeedItem that has no FeedItemId.
VANITY_PHONE_NUMBER_NOT_ALLOWED (int): Call extension vanity phone numbers are not supported.
INVALID_REVIEW_EXTENSION_SNIPPET (int): Invalid review text.
INVALID_NUMBER_FORMAT (int): Invalid format for numeric value in ad parameter.
INVALID_DATE_FORMAT (int): Invalid format for date value in ad parameter.
INVALID_PRICE_FORMAT (int): Invalid format for price value in ad parameter.
UNKNOWN_PLACEHOLDER_FIELD (int): Unrecognized type given for value in ad parameter.
MISSING_ENHANCED_SITELINK_DESCRIPTION_LINE (int): Enhanced sitelinks must have both description lines specified.
REVIEW_EXTENSION_SOURCE_INELIGIBLE (int): Review source is ineligible.
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET (int): Review text cannot contain hyphens or dashes.
DOUBLE_QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Review text cannot contain double quote characters.
QUOTES_IN_REVIEW_EXTENSION_SNIPPET (int): Review text cannot contain quote characters.
INVALID_FORM_ENCODED_PARAMS (int): Parameters are encoded in the wrong format.
INVALID_URL_PARAMETER_NAME (int): URL parameter name must contain only letters, numbers, underscores, and
dashes.
NO_GEOCODING_RESULT (int): Cannot find address location.
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT (int): Review extension text has source name.
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED (int): Some phone numbers can be shorter than usual. Some of these short numbers
are carrier-specific, and we disallow those in ad extensions because they
will not be available to all users.
INVALID_PLACEHOLDER_FIELD_ID (int): Triggered when a request references a placeholder field id that does not
exist.
INVALID_URL_TAG (int): URL contains invalid ValueTrack tags or format.
LIST_TOO_LONG (int): Provided list exceeds acceptable size.
INVALID_ATTRIBUTES_COMBINATION (int): Certain combinations of attributes aren't allowed to be specified in the
same feed item.
DUPLICATE_VALUES (int): An attribute has the same value repeatedly.
INVALID_CALL_CONVERSION_ACTION_ID (int): Advertisers can link a conversion action with a phone number to indicate
that sufficiently long calls forwarded to that phone number should be
counted as conversions of the specified type. This is an error message
indicating that the conversion action specified is invalid (e.g., the
conversion action does not exist within the appropriate Google Ads
account, or it is a type of conversion not appropriate to phone call
conversions).
CANNOT_SET_WITHOUT_FINAL_URLS (int): Tracking template requires final url to be set.
APP_ID_DOESNT_EXIST_IN_APP_STORE (int): An app id was provided that doesn't exist in the given app store.
INVALID_FINAL_URL (int): Invalid U2 final url.
INVALID_TRACKING_URL (int): Invalid U2 tracking url.
INVALID_FINAL_URL_FOR_APP_DOWNLOAD_URL (int): Final URL should start from App download URL.
LIST_TOO_SHORT (int): List provided is too short.
INVALID_USER_ACTION (int): User Action field has invalid value.
INVALID_TYPE_NAME (int): Type field has invalid value.
INVALID_EVENT_CHANGE_STATUS (int): Change status for event is invalid.
INVALID_SNIPPETS_HEADER (int): The header of a structured snippets extension is not one of the valid
headers.
INVALID_ANDROID_APP_LINK (int): Android app link is not formatted correctly
NUMBER_TYPE_WITH_CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY (int): Phone number incompatible with call tracking for country.
RESERVED_KEYWORD_OTHER (int): The input is identical to a reserved keyword
DUPLICATE_OPTION_LABELS (int): Each option label in the message extension must be unique.
DUPLICATE_OPTION_PREFILLS (int): Each option prefill in the message extension must be unique.
UNEQUAL_LIST_LENGTHS (int): In message extensions, the number of optional labels and optional
prefills must be the same.
INCONSISTENT_CURRENCY_CODES (int): All currency codes in an ad extension must be the same.
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS (int): Headers in price extension are not unique.
ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION (int): Header and description in an item are the same.
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS (int): Price extension has too few items.
UNSUPPORTED_VALUE (int): The given value is not supported.
INVALID_FINAL_MOBILE_URL (int): Invalid final mobile url.
INVALID_KEYWORDLESS_AD_RULE_LABEL (int): The given string value of Label contains invalid characters
VALUE_TRACK_PARAMETER_NOT_SUPPORTED (int): The given URL contains value track parameters.
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE (int): The given value is not supported in the selected language of an
extension.
INVALID_IOS_APP_LINK (int): The iOS app link is not formatted correctly.
MISSING_IOS_APP_LINK_OR_IOS_APP_STORE_ID (int): iOS app link or iOS app store id is missing.
PROMOTION_INVALID_TIME (int): Promotion time is invalid.
PROMOTION_CANNOT_SET_PERCENT_OFF_AND_MONEY_AMOUNT_OFF (int): Both the percent off and money amount off fields are set.
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT (int): Both the promotion code and orders over amount fields are set.
TOO_MANY_DECIMAL_PLACES_SPECIFIED (int): Too many decimal places are specified.
AD_CUSTOMIZERS_NOT_ALLOWED (int): Ad Customizers are present and not allowed.
INVALID_LANGUAGE_CODE (int): Language code is not valid.
UNSUPPORTED_LANGUAGE (int): Language is not supported.
IF_FUNCTION_NOT_ALLOWED (int): IF Function is present and not allowed.
INVALID_FINAL_URL_SUFFIX (int): Final url suffix is not valid.
INVALID_TAG_IN_FINAL_URL_SUFFIX (int): Final url suffix contains an invalid tag.
INVALID_FINAL_URL_SUFFIX_FORMAT (int): Final url suffix is formatted incorrectly.
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED (int): Consent for call recording, which is required for the use of call
extensions, was not provided by the advertiser. Please see
https://support.google.com/google-ads/answer/7412639.
ONLY_ONE_DELIVERY_OPTION_IS_ALLOWED (int): Multiple message delivery options are set.
NO_DELIVERY_OPTION_IS_SET (int): No message delivery option is set.
INVALID_CONVERSION_REPORTING_STATE (int): String value of conversion reporting state field is not valid.
IMAGE_SIZE_WRONG (int): Image size is not right.
EMAIL_DELIVERY_NOT_AVAILABLE_IN_COUNTRY (int): Email delivery is not supported in the country specified in the country
code field.
AUTO_REPLY_NOT_AVAILABLE_IN_COUNTRY (int): Auto reply is not supported in the country specified in the country code
field.
INVALID_LATITUDE_VALUE (int): Invalid value specified for latitude.
INVALID_LONGITUDE_VALUE (int): Invalid value specified for longitude.
TOO_MANY_LABELS (int): Too many label fields provided.
INVALID_IMAGE_URL (int): Invalid image url.
MISSING_LATITUDE_VALUE (int): Latitude value is missing.
MISSING_LONGITUDE_VALUE (int): Longitude value is missing.
ADDRESS_NOT_FOUND (int): Unable to find address.
ADDRESS_NOT_TARGETABLE (int): Cannot target provided address.
"""
UNSPECIFIED = 0
UNKNOWN = 1
STRING_TOO_SHORT = 2
STRING_TOO_LONG = 3
VALUE_NOT_SPECIFIED = 4
INVALID_DOMESTIC_PHONE_NUMBER_FORMAT = 5
INVALID_PHONE_NUMBER = 6
PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY = 7
PREMIUM_RATE_NUMBER_NOT_ALLOWED = 8
DISALLOWED_NUMBER_TYPE = 9
VALUE_OUT_OF_RANGE = 10
CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 11
CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING = 12
INVALID_COUNTRY_CODE = 13
INVALID_APP_ID = 14
MISSING_ATTRIBUTES_FOR_FIELDS = 15
INVALID_TYPE_ID = 16
INVALID_EMAIL_ADDRESS = 17
INVALID_HTTPS_URL = 18
MISSING_DELIVERY_ADDRESS = 19
START_DATE_AFTER_END_DATE = 20
MISSING_FEED_ITEM_START_TIME = 21
MISSING_FEED_ITEM_END_TIME = 22
MISSING_FEED_ITEM_ID = 23
VANITY_PHONE_NUMBER_NOT_ALLOWED = 24
INVALID_REVIEW_EXTENSION_SNIPPET = 25
INVALID_NUMBER_FORMAT = 26
INVALID_DATE_FORMAT = 27
INVALID_PRICE_FORMAT = 28
UNKNOWN_PLACEHOLDER_FIELD = 29
MISSING_ENHANCED_SITELINK_DESCRIPTION_LINE = 30
REVIEW_EXTENSION_SOURCE_INELIGIBLE = 31
HYPHENS_IN_REVIEW_EXTENSION_SNIPPET = 32
DOUBLE_QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 33
QUOTES_IN_REVIEW_EXTENSION_SNIPPET = 34
INVALID_FORM_ENCODED_PARAMS = 35
INVALID_URL_PARAMETER_NAME = 36
NO_GEOCODING_RESULT = 37
SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT = 38
CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED = 39
INVALID_PLACEHOLDER_FIELD_ID = 40
INVALID_URL_TAG = 41
LIST_TOO_LONG = 42
INVALID_ATTRIBUTES_COMBINATION = 43
DUPLICATE_VALUES = 44
INVALID_CALL_CONVERSION_ACTION_ID = 45
CANNOT_SET_WITHOUT_FINAL_URLS = 46
APP_ID_DOESNT_EXIST_IN_APP_STORE = 47
INVALID_FINAL_URL = 48
INVALID_TRACKING_URL = 49
INVALID_FINAL_URL_FOR_APP_DOWNLOAD_URL = 50
LIST_TOO_SHORT = 51
INVALID_USER_ACTION = 52
INVALID_TYPE_NAME = 53
INVALID_EVENT_CHANGE_STATUS = 54
INVALID_SNIPPETS_HEADER = 55
INVALID_ANDROID_APP_LINK = 56
NUMBER_TYPE_WITH_CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY = 57
RESERVED_KEYWORD_OTHER = 58
DUPLICATE_OPTION_LABELS = 59
DUPLICATE_OPTION_PREFILLS = 60
UNEQUAL_LIST_LENGTHS = 61
INCONSISTENT_CURRENCY_CODES = 62
PRICE_EXTENSION_HAS_DUPLICATED_HEADERS = 63
ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION = 64
PRICE_EXTENSION_HAS_TOO_FEW_ITEMS = 65
UNSUPPORTED_VALUE = 66
INVALID_FINAL_MOBILE_URL = 67
INVALID_KEYWORDLESS_AD_RULE_LABEL = 68
VALUE_TRACK_PARAMETER_NOT_SUPPORTED = 69
UNSUPPORTED_VALUE_IN_SELECTED_LANGUAGE = 70
INVALID_IOS_APP_LINK = 71
MISSING_IOS_APP_LINK_OR_IOS_APP_STORE_ID = 72
PROMOTION_INVALID_TIME = 73
PROMOTION_CANNOT_SET_PERCENT_OFF_AND_MONEY_AMOUNT_OFF = 74
PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT = 75
TOO_MANY_DECIMAL_PLACES_SPECIFIED = 76
AD_CUSTOMIZERS_NOT_ALLOWED = 77
INVALID_LANGUAGE_CODE = 78
UNSUPPORTED_LANGUAGE = 79
IF_FUNCTION_NOT_ALLOWED = 80
INVALID_FINAL_URL_SUFFIX = 81
INVALID_TAG_IN_FINAL_URL_SUFFIX = 82
INVALID_FINAL_URL_SUFFIX_FORMAT = 83
CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED = 84
ONLY_ONE_DELIVERY_OPTION_IS_ALLOWED = 85
NO_DELIVERY_OPTION_IS_SET = 86
INVALID_CONVERSION_REPORTING_STATE = 87
IMAGE_SIZE_WRONG = 88
EMAIL_DELIVERY_NOT_AVAILABLE_IN_COUNTRY = 89
AUTO_REPLY_NOT_AVAILABLE_IN_COUNTRY = 90
INVALID_LATITUDE_VALUE = 91
INVALID_LONGITUDE_VALUE = 92
TOO_MANY_LABELS = 93
INVALID_IMAGE_URL = 94
MISSING_LATITUDE_VALUE = 95
MISSING_LONGITUDE_VALUE = 96
ADDRESS_NOT_FOUND = 97
ADDRESS_NOT_TARGETABLE = 98
'''
FeedItemValidationErrorEnum = FeedItemValidationErrorEnum() # For __getattribute__
class FeedItemValidationStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedItemValidationStatusEnum = '''\
class FeedItemValidationStatus(enum.IntEnum):
"""
The possible validation statuses of a feed item.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): Validation pending.
INVALID (int): An error was found.
VALID (int): Feed item is semantically well-formed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
INVALID = 3
VALID = 4
'''
FeedItemValidationStatusEnum = FeedItemValidationStatusEnum() # For __getattribute__
class FeedLinkStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedLinkStatusEnum = '''\
class FeedLinkStatus(enum.IntEnum):
"""
Possible statuses of a feed link.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed link is enabled.
REMOVED (int): Feed link has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
FeedLinkStatusEnum = FeedLinkStatusEnum() # For __getattribute__
class FeedMappingCriterionTypeEnum(_CreateEnumTypeUponFirstAccess):
FeedMappingCriterionTypeEnum = '''\
class FeedMappingCriterionType(enum.IntEnum):
"""
Possible placeholder types for a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LOCATION_EXTENSION_TARGETING (int): Allows campaign targeting at locations within a location feed.
DSA_PAGE_FEED (int): Allows url targeting for your dynamic search ads within a page feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOCATION_EXTENSION_TARGETING = 4
DSA_PAGE_FEED = 3
'''
FeedMappingCriterionTypeEnum = FeedMappingCriterionTypeEnum() # For __getattribute__
class FeedMappingErrorEnum(_CreateEnumTypeUponFirstAccess):
FeedMappingErrorEnum = '''\
class FeedMappingError(enum.IntEnum):
"""
Enum describing possible feed item errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_PLACEHOLDER_FIELD (int): The given placeholder field does not exist.
INVALID_CRITERION_FIELD (int): The given criterion field does not exist.
INVALID_PLACEHOLDER_TYPE (int): The given placeholder type does not exist.
INVALID_CRITERION_TYPE (int): The given criterion type does not exist.
NO_ATTRIBUTE_FIELD_MAPPINGS (int): A feed mapping must contain at least one attribute field mapping.
FEED_ATTRIBUTE_TYPE_MISMATCH (int): The type of the feed attribute referenced in the attribute field mapping
must match the type of the placeholder field.
CANNOT_OPERATE_ON_MAPPINGS_FOR_SYSTEM_GENERATED_FEED (int): A feed mapping for a system generated feed cannot be operated on.
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_TYPE (int): Only one feed mapping for a placeholder type is allowed per feed or
customer (depending on the placeholder type).
MULTIPLE_MAPPINGS_FOR_CRITERION_TYPE (int): Only one feed mapping for a criterion type is allowed per customer.
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_FIELD (int): Only one feed attribute mapping for a placeholder field is allowed
(depending on the placeholder type).
MULTIPLE_MAPPINGS_FOR_CRITERION_FIELD (int): Only one feed attribute mapping for a criterion field is allowed
(depending on the criterion type).
UNEXPECTED_ATTRIBUTE_FIELD_MAPPINGS (int): This feed mapping may not contain any explicit attribute field mappings.
LOCATION_PLACEHOLDER_ONLY_FOR_PLACES_FEEDS (int): Location placeholder feed mappings can only be created for Places feeds.
CANNOT_MODIFY_MAPPINGS_FOR_TYPED_FEED (int): Mappings for typed feeds cannot be modified.
INVALID_PLACEHOLDER_TYPE_FOR_NON_SYSTEM_GENERATED_FEED (int): The given placeholder type can only be mapped to system generated feeds.
INVALID_PLACEHOLDER_TYPE_FOR_SYSTEM_GENERATED_FEED_TYPE (int): The given placeholder type cannot be mapped to a system generated feed
with the given type.
ATTRIBUTE_FIELD_MAPPING_MISSING_FIELD (int): The "field" oneof was not set in an AttributeFieldMapping.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_PLACEHOLDER_FIELD = 2
INVALID_CRITERION_FIELD = 3
INVALID_PLACEHOLDER_TYPE = 4
INVALID_CRITERION_TYPE = 5
NO_ATTRIBUTE_FIELD_MAPPINGS = 7
FEED_ATTRIBUTE_TYPE_MISMATCH = 8
CANNOT_OPERATE_ON_MAPPINGS_FOR_SYSTEM_GENERATED_FEED = 9
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_TYPE = 10
MULTIPLE_MAPPINGS_FOR_CRITERION_TYPE = 11
MULTIPLE_MAPPINGS_FOR_PLACEHOLDER_FIELD = 12
MULTIPLE_MAPPINGS_FOR_CRITERION_FIELD = 13
UNEXPECTED_ATTRIBUTE_FIELD_MAPPINGS = 14
LOCATION_PLACEHOLDER_ONLY_FOR_PLACES_FEEDS = 15
CANNOT_MODIFY_MAPPINGS_FOR_TYPED_FEED = 16
INVALID_PLACEHOLDER_TYPE_FOR_NON_SYSTEM_GENERATED_FEED = 17
INVALID_PLACEHOLDER_TYPE_FOR_SYSTEM_GENERATED_FEED_TYPE = 18
ATTRIBUTE_FIELD_MAPPING_MISSING_FIELD = 19
'''
FeedMappingErrorEnum = FeedMappingErrorEnum() # For __getattribute__
class FeedMappingStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedMappingStatusEnum = '''\
class FeedMappingStatus(enum.IntEnum):
"""
Possible statuses of a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed mapping is enabled.
REMOVED (int): Feed mapping has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
FeedMappingStatusEnum = FeedMappingStatusEnum() # For __getattribute__
class FeedOriginEnum(_CreateEnumTypeUponFirstAccess):
FeedOriginEnum = '''\
class FeedOrigin(enum.IntEnum):
"""
Possible values for a feed origin.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
USER (int): The FeedAttributes for this Feed are managed by the
user. Users can add FeedAttributes to this Feed.
GOOGLE (int): The FeedAttributes for an GOOGLE Feed are created by Google. A feed of
this type is maintained by Google and will have the correct attributes
for the placeholder type of the feed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
USER = 2
GOOGLE = 3
'''
FeedOriginEnum = FeedOriginEnum() # For __getattribute__
class FeedStatusEnum(_CreateEnumTypeUponFirstAccess):
FeedStatusEnum = '''\
class FeedStatus(enum.IntEnum):
"""
Possible statuses of a feed.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Feed is enabled.
REMOVED (int): Feed has been removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
FeedStatusEnum = FeedStatusEnum() # For __getattribute__
class FieldErrorEnum(_CreateEnumTypeUponFirstAccess):
FieldErrorEnum = '''\
class FieldError(enum.IntEnum):
"""
Enum describing possible field errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
REQUIRED (int): The required field was not present.
IMMUTABLE_FIELD (int): The field attempted to be mutated is immutable.
INVALID_VALUE (int): The field's value is invalid.
VALUE_MUST_BE_UNSET (int): The field cannot be set.
REQUIRED_NONEMPTY_LIST (int): The required repeated field was empty.
FIELD_CANNOT_BE_CLEARED (int): The field cannot be cleared.
BLACKLISTED_VALUE (int): The field's value is on a blacklist for this field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUIRED = 2
IMMUTABLE_FIELD = 3
INVALID_VALUE = 4
VALUE_MUST_BE_UNSET = 5
REQUIRED_NONEMPTY_LIST = 6
FIELD_CANNOT_BE_CLEARED = 7
BLACKLISTED_VALUE = 8
'''
FieldErrorEnum = FieldErrorEnum() # For __getattribute__
class FieldMaskErrorEnum(_CreateEnumTypeUponFirstAccess):
FieldMaskErrorEnum = '''\
class FieldMaskError(enum.IntEnum):
"""
Enum describing possible field mask errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FIELD_MASK_MISSING (int): The field mask must be provided for update operations.
FIELD_MASK_NOT_ALLOWED (int): The field mask must be empty for create and remove operations.
FIELD_NOT_FOUND (int): The field mask contained an invalid field.
FIELD_HAS_SUBFIELDS (int): The field mask updated a field with subfields. Fields with subfields may
be cleared, but not updated. To fix this, the field mask should select
all the subfields of the invalid field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIELD_MASK_MISSING = 5
FIELD_MASK_NOT_ALLOWED = 4
FIELD_NOT_FOUND = 2
FIELD_HAS_SUBFIELDS = 3
'''
FieldMaskErrorEnum = FieldMaskErrorEnum() # For __getattribute__
class FlightPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
FlightPlaceholderFieldEnum = '''\
class FlightPlaceholderField(enum.IntEnum):
"""
Possible values for Flight placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DESTINATION_ID (int): Data Type: STRING. Required. Destination id. Example: PAR, LON.
For feed items that only have destination id, destination id must be a
unique key. For feed items that have both destination id and origin id,
then the combination must be a unique key.
ORIGIN_ID (int): Data Type: STRING. Origin id. Example: PAR, LON.
Optional. Combination of destination id and origin id must be unique per
offer.
FLIGHT_DESCRIPTION (int): Data Type: STRING. Required. Main headline with product name to be shown
in dynamic ad.
ORIGIN_NAME (int): Data Type: STRING. Shorter names are recommended.
DESTINATION_NAME (int): Data Type: STRING. Shorter names are recommended.
FLIGHT_PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
FLIGHT_SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs for the ad when using
Upgraded URLs. User will be redirected to these URLs when they click on
an ad, or when they click on a specific flight for ads that show
multiple flights.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_DESTINATION_IDS (int): Data Type: STRING\_LIST. List of recommended destination IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DESTINATION_ID = 2
ORIGIN_ID = 3
FLIGHT_DESCRIPTION = 4
ORIGIN_NAME = 5
DESTINATION_NAME = 6
FLIGHT_PRICE = 7
FORMATTED_PRICE = 8
FLIGHT_SALE_PRICE = 9
FORMATTED_SALE_PRICE = 10
IMAGE_URL = 11
FINAL_URLS = 12
FINAL_MOBILE_URLS = 13
TRACKING_URL = 14
ANDROID_APP_LINK = 15
SIMILAR_DESTINATION_IDS = 16
IOS_APP_LINK = 17
IOS_APP_STORE_ID = 18
'''
FlightPlaceholderFieldEnum = FlightPlaceholderFieldEnum() # For __getattribute__
class FrequencyCapEventTypeEnum(_CreateEnumTypeUponFirstAccess):
FrequencyCapEventTypeEnum = '''\
class FrequencyCapEventType(enum.IntEnum):
"""
The type of event that the cap applies to (e.g. impression).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
IMPRESSION (int): The cap applies on ad impressions.
VIDEO_VIEW (int): The cap applies on video ad views.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMPRESSION = 2
VIDEO_VIEW = 3
'''
FrequencyCapEventTypeEnum = FrequencyCapEventTypeEnum() # For __getattribute__
class FrequencyCapLevelEnum(_CreateEnumTypeUponFirstAccess):
FrequencyCapLevelEnum = '''\
class FrequencyCapLevel(enum.IntEnum):
"""
The level on which the cap is to be applied (e.g ad group ad, ad group).
Cap is applied to all the resources of this level.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AD_GROUP_AD (int): The cap is applied at the ad group ad level.
AD_GROUP (int): The cap is applied at the ad group level.
CAMPAIGN (int): The cap is applied at the campaign level.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD = 2
AD_GROUP = 3
CAMPAIGN = 4
'''
FrequencyCapLevelEnum = FrequencyCapLevelEnum() # For __getattribute__
class FrequencyCapTimeUnitEnum(_CreateEnumTypeUponFirstAccess):
FrequencyCapTimeUnitEnum = '''\
class FrequencyCapTimeUnit(enum.IntEnum):
"""
Unit of time the cap is defined at (e.g. day, week).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DAY (int): The cap would define limit per one day.
WEEK (int): The cap would define limit per one week.
MONTH (int): The cap would define limit per one month.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DAY = 2
WEEK = 3
MONTH = 4
'''
FrequencyCapTimeUnitEnum = FrequencyCapTimeUnitEnum() # For __getattribute__
class FunctionErrorEnum(_CreateEnumTypeUponFirstAccess):
FunctionErrorEnum = '''\
class FunctionError(enum.IntEnum):
"""
Enum describing possible function errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_FUNCTION_FORMAT (int): The format of the function is not recognized as a supported function
format.
DATA_TYPE_MISMATCH (int): Operand data types do not match.
INVALID_CONJUNCTION_OPERANDS (int): The operands cannot be used together in a conjunction.
INVALID_NUMBER_OF_OPERANDS (int): Invalid numer of Operands.
INVALID_OPERAND_TYPE (int): Operand Type not supported.
INVALID_OPERATOR (int): Operator not supported.
INVALID_REQUEST_CONTEXT_TYPE (int): Request context type not supported.
INVALID_FUNCTION_FOR_CALL_PLACEHOLDER (int): The matching function is not allowed for call placeholders
INVALID_FUNCTION_FOR_PLACEHOLDER (int): The matching function is not allowed for the specified placeholder
INVALID_OPERAND (int): Invalid operand.
MISSING_CONSTANT_OPERAND_VALUE (int): Missing value for the constant operand.
INVALID_CONSTANT_OPERAND_VALUE (int): The value of the constant operand is invalid.
INVALID_NESTING (int): Invalid function nesting.
MULTIPLE_FEED_IDS_NOT_SUPPORTED (int): The Feed ID was different from another Feed ID in the same function.
INVALID_FUNCTION_FOR_FEED_WITH_FIXED_SCHEMA (int): The matching function is invalid for use with a feed with a fixed schema.
INVALID_ATTRIBUTE_NAME (int): Invalid attribute name.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_FUNCTION_FORMAT = 2
DATA_TYPE_MISMATCH = 3
INVALID_CONJUNCTION_OPERANDS = 4
INVALID_NUMBER_OF_OPERANDS = 5
INVALID_OPERAND_TYPE = 6
INVALID_OPERATOR = 7
INVALID_REQUEST_CONTEXT_TYPE = 8
INVALID_FUNCTION_FOR_CALL_PLACEHOLDER = 9
INVALID_FUNCTION_FOR_PLACEHOLDER = 10
INVALID_OPERAND = 11
MISSING_CONSTANT_OPERAND_VALUE = 12
INVALID_CONSTANT_OPERAND_VALUE = 13
INVALID_NESTING = 14
MULTIPLE_FEED_IDS_NOT_SUPPORTED = 15
INVALID_FUNCTION_FOR_FEED_WITH_FIXED_SCHEMA = 16
INVALID_ATTRIBUTE_NAME = 17
'''
FunctionErrorEnum = FunctionErrorEnum() # For __getattribute__
class FunctionParsingErrorEnum(_CreateEnumTypeUponFirstAccess):
FunctionParsingErrorEnum = '''\
class FunctionParsingError(enum.IntEnum):
"""
Enum describing possible function parsing errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NO_MORE_INPUT (int): Unexpected end of function string.
EXPECTED_CHARACTER (int): Could not find an expected character.
UNEXPECTED_SEPARATOR (int): Unexpected separator character.
UNMATCHED_LEFT_BRACKET (int): Unmatched left bracket or parenthesis.
UNMATCHED_RIGHT_BRACKET (int): Unmatched right bracket or parenthesis.
TOO_MANY_NESTED_FUNCTIONS (int): Functions are nested too deeply.
MISSING_RIGHT_HAND_OPERAND (int): Missing right-hand-side operand.
INVALID_OPERATOR_NAME (int): Invalid operator/function name.
FEED_ATTRIBUTE_OPERAND_ARGUMENT_NOT_INTEGER (int): Feed attribute operand's argument is not an integer.
NO_OPERANDS (int): Missing function operands.
TOO_MANY_OPERANDS (int): Function had too many operands.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NO_MORE_INPUT = 2
EXPECTED_CHARACTER = 3
UNEXPECTED_SEPARATOR = 4
UNMATCHED_LEFT_BRACKET = 5
UNMATCHED_RIGHT_BRACKET = 6
TOO_MANY_NESTED_FUNCTIONS = 7
MISSING_RIGHT_HAND_OPERAND = 8
INVALID_OPERATOR_NAME = 9
FEED_ATTRIBUTE_OPERAND_ARGUMENT_NOT_INTEGER = 10
NO_OPERANDS = 11
TOO_MANY_OPERANDS = 12
'''
FunctionParsingErrorEnum = FunctionParsingErrorEnum() # For __getattribute__
class GenderTypeEnum(_CreateEnumTypeUponFirstAccess):
GenderTypeEnum = '''\
class GenderType(enum.IntEnum):
"""
The type of demographic genders (e.g. female).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MALE (int): Male.
FEMALE (int): Female.
UNDETERMINED (int): Undetermined gender.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MALE = 10
FEMALE = 11
UNDETERMINED = 20
'''
GenderTypeEnum = GenderTypeEnum() # For __getattribute__
class GeoTargetConstantStatusEnum(_CreateEnumTypeUponFirstAccess):
GeoTargetConstantStatusEnum = '''\
class GeoTargetConstantStatus(enum.IntEnum):
"""
The possible statuses of a geo target constant.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
ENABLED (int): The geo target constant is valid.
REMOVAL_PLANNED (int): The geo target constant is obsolete and will be removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVAL_PLANNED = 3
'''
GeoTargetConstantStatusEnum = GeoTargetConstantStatusEnum() # For __getattribute__
class GeoTargetConstantSuggestionErrorEnum(_CreateEnumTypeUponFirstAccess):
GeoTargetConstantSuggestionErrorEnum = '''\
class GeoTargetConstantSuggestionError(enum.IntEnum):
"""
Enum describing possible geo target constant suggestion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
LOCATION_NAME_SIZE_LIMIT (int): A location name cannot be greater than 300 characters.
LOCATION_NAME_LIMIT (int): At most 25 location names can be specified in a SuggestGeoTargetConstants
method.
INVALID_COUNTRY_CODE (int): The country code is invalid.
REQUEST_PARAMETERS_UNSET (int): Geo target constant resource names or location names must be provided in
the request.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOCATION_NAME_SIZE_LIMIT = 2
LOCATION_NAME_LIMIT = 3
INVALID_COUNTRY_CODE = 4
REQUEST_PARAMETERS_UNSET = 5
'''
GeoTargetConstantSuggestionErrorEnum = GeoTargetConstantSuggestionErrorEnum() # For __getattribute__
class GeoTargetingRestrictionEnum(_CreateEnumTypeUponFirstAccess):
GeoTargetingRestrictionEnum = '''\
class GeoTargetingRestriction(enum.IntEnum):
"""
A restriction used to determine if the request context's
geo should be matched.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LOCATION_OF_PRESENCE (int): Indicates that request context should match the physical location of
the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOCATION_OF_PRESENCE = 2
'''
GeoTargetingRestrictionEnum = GeoTargetingRestrictionEnum() # For __getattribute__
class GeoTargetingTypeEnum(_CreateEnumTypeUponFirstAccess):
GeoTargetingTypeEnum = '''\
class GeoTargetingType(enum.IntEnum):
"""
The possible geo targeting types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
AREA_OF_INTEREST (int): Location the user is interested in while making the query.
LOCATION_OF_PRESENCE (int): Location of the user issuing the query.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AREA_OF_INTEREST = 2
LOCATION_OF_PRESENCE = 3
'''
GeoTargetingTypeEnum = GeoTargetingTypeEnum() # For __getattribute__
class GoogleAdsFieldCategoryEnum(_CreateEnumTypeUponFirstAccess):
GoogleAdsFieldCategoryEnum = '''\
class GoogleAdsFieldCategory(enum.IntEnum):
"""
The category of the artifact.
Attributes:
UNSPECIFIED (int): Unspecified
UNKNOWN (int): Unknown
RESOURCE (int): The described artifact is a resource.
ATTRIBUTE (int): The described artifact is a field and is an attribute of a resource.
Including a resource attribute field in a query may segment the query if
the resource to which it is attributed segments the resource found in
the FROM clause.
SEGMENT (int): The described artifact is a field and always segments search queries.
METRIC (int): The described artifact is a field and is a metric. It never segments
search queries.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE = 2
ATTRIBUTE = 3
SEGMENT = 5
METRIC = 6
'''
GoogleAdsFieldCategoryEnum = GoogleAdsFieldCategoryEnum() # For __getattribute__
class GoogleAdsFieldDataTypeEnum(_CreateEnumTypeUponFirstAccess):
GoogleAdsFieldDataTypeEnum = '''\
class GoogleAdsFieldDataType(enum.IntEnum):
"""
These are the various types a GoogleAdsService artifact may take on.
Attributes:
UNSPECIFIED (int): Unspecified
UNKNOWN (int): Unknown
BOOLEAN (int): Maps to google.protobuf.BoolValue
Applicable operators: =, !=
DATE (int): Maps to google.protobuf.StringValue. It can be compared using the set of
operators specific to dates however.
Applicable operators: =, <, >, <=, >=, BETWEEN, DURING, and IN
DOUBLE (int): Maps to google.protobuf.DoubleValue
Applicable operators: =, !=, <, >, IN, NOT IN
ENUM (int): Maps to an enum. It's specific definition can be found at type\_url.
Applicable operators: =, !=, IN, NOT IN
FLOAT (int): Maps to google.protobuf.FloatValue
Applicable operators: =, !=, <, >, IN, NOT IN
INT32 (int): Maps to google.protobuf.Int32Value
Applicable operators: =, !=, <, >, <=, >=, BETWEEN, IN, NOT IN
INT64 (int): Maps to google.protobuf.Int64Value
Applicable operators: =, !=, <, >, <=, >=, BETWEEN, IN, NOT IN
MESSAGE (int): Maps to a protocol buffer message type. The data type's details can be
found in type\_url.
No operators work with MESSAGE fields.
RESOURCE_NAME (int): Maps to google.protobuf.StringValue. Represents the resource name
(unique id) of a resource or one of its foreign keys.
No operators work with RESOURCE\_NAME fields.
STRING (int): Maps to google.protobuf.StringValue.
Applicable operators: =, !=, LIKE, NOT LIKE, IN, NOT IN
UINT64 (int): Maps to google.protobuf.UInt64Value
Applicable operators: =, !=, <, >, <=, >=, BETWEEN, IN, NOT IN
"""
UNSPECIFIED = 0
UNKNOWN = 1
BOOLEAN = 2
DATE = 3
DOUBLE = 4
ENUM = 5
FLOAT = 6
INT32 = 7
INT64 = 8
MESSAGE = 9
RESOURCE_NAME = 10
STRING = 11
UINT64 = 12
'''
GoogleAdsFieldDataTypeEnum = GoogleAdsFieldDataTypeEnum() # For __getattribute__
class HeaderErrorEnum(_CreateEnumTypeUponFirstAccess):
HeaderErrorEnum = '''\
class HeaderError(enum.IntEnum):
"""
Enum describing possible header errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_LOGIN_CUSTOMER_ID (int): The login customer id could not be validated.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_LOGIN_CUSTOMER_ID = 3
'''
HeaderErrorEnum = HeaderErrorEnum() # For __getattribute__
class HotelDateSelectionTypeEnum(_CreateEnumTypeUponFirstAccess):
HotelDateSelectionTypeEnum = '''\
class HotelDateSelectionType(enum.IntEnum):
"""
Enum describing possible hotel date selection types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DEFAULT_SELECTION (int): Dates selected by default.
USER_SELECTED (int): Dates selected by the user.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DEFAULT_SELECTION = 50
USER_SELECTED = 51
'''
HotelDateSelectionTypeEnum = HotelDateSelectionTypeEnum() # For __getattribute__
class HotelPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
HotelPlaceholderFieldEnum = '''\
class HotelPlaceholderField(enum.IntEnum):
"""
Possible values for Hotel placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROPERTY_ID (int): Data Type: STRING. Required. Unique ID.
PROPERTY_NAME (int): Data Type: STRING. Required. Main headline with property name to be shown
in dynamic ad.
DESTINATION_NAME (int): Data Type: STRING. Name of destination to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of destination to be shown in dynamic ad.
ADDRESS (int): Data Type: STRING. Complete property address, including postal code.
PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
CATEGORY (int): Data Type: STRING. Category of property used to group like items together
for recommendation engine.
STAR_RATING (int): Data Type: INT64. Star rating (1 to 5) used to group like items
together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs for the ad when using
Upgraded URLs. User will be redirected to these URLs when they click on
an ad, or when they click on a specific flight for ads that show
multiple flights.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_PROPERTY_IDS (int): Data Type: STRING\_LIST. List of recommended property IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROPERTY_ID = 2
PROPERTY_NAME = 3
DESTINATION_NAME = 4
DESCRIPTION = 5
ADDRESS = 6
PRICE = 7
FORMATTED_PRICE = 8
SALE_PRICE = 9
FORMATTED_SALE_PRICE = 10
IMAGE_URL = 11
CATEGORY = 12
STAR_RATING = 13
CONTEXTUAL_KEYWORDS = 14
FINAL_URLS = 15
FINAL_MOBILE_URLS = 16
TRACKING_URL = 17
ANDROID_APP_LINK = 18
SIMILAR_PROPERTY_IDS = 19
IOS_APP_LINK = 20
IOS_APP_STORE_ID = 21
'''
HotelPlaceholderFieldEnum = HotelPlaceholderFieldEnum() # For __getattribute__
class HotelPriceBucketEnum(_CreateEnumTypeUponFirstAccess):
HotelPriceBucketEnum = '''\
class HotelPriceBucket(enum.IntEnum):
"""
Enum describing possible hotel price buckets.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
LOWEST_TIED (int): Tied for lowest price. Partner is within a small variance of the lowest
price.
NOT_LOWEST (int): Not lowest price. Partner is not within a small variance of the lowest
price.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOWEST_TIED = 3
NOT_LOWEST = 4
'''
HotelPriceBucketEnum = HotelPriceBucketEnum() # For __getattribute__
class HotelRateTypeEnum(_CreateEnumTypeUponFirstAccess):
HotelRateTypeEnum = '''\
class HotelRateType(enum.IntEnum):
"""
Enum describing possible hotel rate types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
UNAVAILABLE (int): Rate type information is unavailable.
PUBLIC_RATE (int): Rates available to everyone.
QUALIFIED_RATE (int): A membership program rate is available and satisfies basic requirements
like having a public rate available. UI treatment will strikethrough the
public rate and indicate that a discount is available to the user. For
more on Qualified Rates, visit
https://developers.google.com/hotels/hotel-ads/dev-guide/qualified-rates
PRIVATE_RATE (int): Rates available to users that satisfy some eligibility criteria. e.g.
all signed-in users, 20% of mobile users, all mobile users in Canada,
etc.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNAVAILABLE = 2
PUBLIC_RATE = 3
QUALIFIED_RATE = 4
PRIVATE_RATE = 5
'''
HotelRateTypeEnum = HotelRateTypeEnum() # For __getattribute__
class IdErrorEnum(_CreateEnumTypeUponFirstAccess):
IdErrorEnum = '''\
class IdError(enum.IntEnum):
"""
Enum describing possible id errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NOT_FOUND (int): Id not found
"""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_FOUND = 2
'''
IdErrorEnum = IdErrorEnum() # For __getattribute__
class ImageErrorEnum(_CreateEnumTypeUponFirstAccess):
ImageErrorEnum = '''\
class ImageError(enum.IntEnum):
"""
Enum describing possible image errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_IMAGE (int): The image is not valid.
STORAGE_ERROR (int): The image could not be stored.
BAD_REQUEST (int): There was a problem with the request.
UNEXPECTED_SIZE (int): The image is not of legal dimensions.
ANIMATED_NOT_ALLOWED (int): Animated image are not permitted.
ANIMATION_TOO_LONG (int): Animation is too long.
SERVER_ERROR (int): There was an error on the server.
CMYK_JPEG_NOT_ALLOWED (int): Image cannot be in CMYK color format.
FLASH_NOT_ALLOWED (int): Flash images are not permitted.
FLASH_WITHOUT_CLICKTAG (int): Flash images must support clickTag.
FLASH_ERROR_AFTER_FIXING_CLICK_TAG (int): A flash error has occurred after fixing the click tag.
ANIMATED_VISUAL_EFFECT (int): Unacceptable visual effects.
FLASH_ERROR (int): There was a problem with the flash image.
LAYOUT_PROBLEM (int): Incorrect image layout.
PROBLEM_READING_IMAGE_FILE (int): There was a problem reading the image file.
ERROR_STORING_IMAGE (int): There was an error storing the image.
ASPECT_RATIO_NOT_ALLOWED (int): The aspect ratio of the image is not allowed.
FLASH_HAS_NETWORK_OBJECTS (int): Flash cannot have network objects.
FLASH_HAS_NETWORK_METHODS (int): Flash cannot have network methods.
FLASH_HAS_URL (int): Flash cannot have a Url.
FLASH_HAS_MOUSE_TRACKING (int): Flash cannot use mouse tracking.
FLASH_HAS_RANDOM_NUM (int): Flash cannot have a random number.
FLASH_SELF_TARGETS (int): Ad click target cannot be '\_self'.
FLASH_BAD_GETURL_TARGET (int): GetUrl method should only use '\_blank'.
FLASH_VERSION_NOT_SUPPORTED (int): Flash version is not supported.
FLASH_WITHOUT_HARD_CODED_CLICK_URL (int): Flash movies need to have hard coded click URL or clickTAG
INVALID_FLASH_FILE (int): Uploaded flash file is corrupted.
FAILED_TO_FIX_CLICK_TAG_IN_FLASH (int): Uploaded flash file can be parsed, but the click tag can not be fixed
properly.
FLASH_ACCESSES_NETWORK_RESOURCES (int): Flash movie accesses network resources
FLASH_EXTERNAL_JS_CALL (int): Flash movie attempts to call external javascript code
FLASH_EXTERNAL_FS_CALL (int): Flash movie attempts to call flash system commands
FILE_TOO_LARGE (int): Image file is too large.
IMAGE_DATA_TOO_LARGE (int): Image data is too large.
IMAGE_PROCESSING_ERROR (int): Error while processing the image.
IMAGE_TOO_SMALL (int): Image is too small.
INVALID_INPUT (int): Input was invalid.
PROBLEM_READING_FILE (int): There was a problem reading the image file.
IMAGE_CONSTRAINTS_VIOLATED (int): Image constraints are violated, but details like
ASPECT\_RATIO\_NOT\_ALLOWED can't be provided. This happens when asset
spec contains more than one constraint and different criteria of
different constraints are violated.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_IMAGE = 2
STORAGE_ERROR = 3
BAD_REQUEST = 4
UNEXPECTED_SIZE = 5
ANIMATED_NOT_ALLOWED = 6
ANIMATION_TOO_LONG = 7
SERVER_ERROR = 8
CMYK_JPEG_NOT_ALLOWED = 9
FLASH_NOT_ALLOWED = 10
FLASH_WITHOUT_CLICKTAG = 11
FLASH_ERROR_AFTER_FIXING_CLICK_TAG = 12
ANIMATED_VISUAL_EFFECT = 13
FLASH_ERROR = 14
LAYOUT_PROBLEM = 15
PROBLEM_READING_IMAGE_FILE = 16
ERROR_STORING_IMAGE = 17
ASPECT_RATIO_NOT_ALLOWED = 18
FLASH_HAS_NETWORK_OBJECTS = 19
FLASH_HAS_NETWORK_METHODS = 20
FLASH_HAS_URL = 21
FLASH_HAS_MOUSE_TRACKING = 22
FLASH_HAS_RANDOM_NUM = 23
FLASH_SELF_TARGETS = 24
FLASH_BAD_GETURL_TARGET = 25
FLASH_VERSION_NOT_SUPPORTED = 26
FLASH_WITHOUT_HARD_CODED_CLICK_URL = 27
INVALID_FLASH_FILE = 28
FAILED_TO_FIX_CLICK_TAG_IN_FLASH = 29
FLASH_ACCESSES_NETWORK_RESOURCES = 30
FLASH_EXTERNAL_JS_CALL = 31
FLASH_EXTERNAL_FS_CALL = 32
FILE_TOO_LARGE = 33
IMAGE_DATA_TOO_LARGE = 34
IMAGE_PROCESSING_ERROR = 35
IMAGE_TOO_SMALL = 36
INVALID_INPUT = 37
PROBLEM_READING_FILE = 38
IMAGE_CONSTRAINTS_VIOLATED = 39
'''
ImageErrorEnum = ImageErrorEnum() # For __getattribute__
class IncomeRangeTypeEnum(_CreateEnumTypeUponFirstAccess):
IncomeRangeTypeEnum = '''\
class IncomeRangeType(enum.IntEnum):
"""
The type of demographic income ranges (e.g. between 0% to 50%).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INCOME_RANGE_0_50 (int): 0%-50%.
INCOME_RANGE_50_60 (int): 50% to 60%.
INCOME_RANGE_60_70 (int): 60% to 70%.
INCOME_RANGE_70_80 (int): 70% to 80%.
INCOME_RANGE_80_90 (int): 80% to 90%.
INCOME_RANGE_90_UP (int): Greater than 90%.
INCOME_RANGE_UNDETERMINED (int): Undetermined income range.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INCOME_RANGE_0_50 = 510001
INCOME_RANGE_50_60 = 510002
INCOME_RANGE_60_70 = 510003
INCOME_RANGE_70_80 = 510004
INCOME_RANGE_80_90 = 510005
INCOME_RANGE_90_UP = 510006
INCOME_RANGE_UNDETERMINED = 510000
'''
IncomeRangeTypeEnum = IncomeRangeTypeEnum() # For __getattribute__
class InteractionEventTypeEnum(_CreateEnumTypeUponFirstAccess):
InteractionEventTypeEnum = '''\
class InteractionEventType(enum.IntEnum):
"""
Enum describing possible types of payable and free interactions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CLICK (int): Click to site. In most cases, this interaction navigates to an external
location, usually the advertiser's landing page. This is also the default
InteractionEventType for click events.
ENGAGEMENT (int): The user's expressed intent to engage with the ad in-place.
VIDEO_VIEW (int): User viewed a video ad.
NONE (int): The default InteractionEventType for ad conversion events.
This is used when an ad conversion row does NOT indicate
that the free interactions (i.e., the ad conversions)
should be 'promoted' and reported as part of the core metrics.
These are simply other (ad) conversions.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CLICK = 2
ENGAGEMENT = 3
VIDEO_VIEW = 4
NONE = 5
'''
InteractionEventTypeEnum = InteractionEventTypeEnum() # For __getattribute__
class InteractionTypeEnum(_CreateEnumTypeUponFirstAccess):
InteractionTypeEnum = '''\
class InteractionType(enum.IntEnum):
"""
Enum describing possible interaction types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CALLS (int): Calls.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CALLS = 8000
'''
InteractionTypeEnum = InteractionTypeEnum() # For __getattribute__
class InternalErrorEnum(_CreateEnumTypeUponFirstAccess):
InternalErrorEnum = '''\
class InternalError(enum.IntEnum):
"""
Enum describing possible internal errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INTERNAL_ERROR (int): Google Ads API encountered unexpected internal error.
ERROR_CODE_NOT_PUBLISHED (int): The intended error code doesn't exist in specified API version. It will
be released in a future API version.
TRANSIENT_ERROR (int): Google Ads API encountered an unexpected transient error. The user
should retry their request in these cases.
DEADLINE_EXCEEDED (int): The request took longer than a deadline.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INTERNAL_ERROR = 2
ERROR_CODE_NOT_PUBLISHED = 3
TRANSIENT_ERROR = 4
DEADLINE_EXCEEDED = 5
'''
InternalErrorEnum = InternalErrorEnum() # For __getattribute__
class InvoiceErrorEnum(_CreateEnumTypeUponFirstAccess):
InvoiceErrorEnum = '''\
class InvoiceError(enum.IntEnum):
"""
Enum describing possible invoice errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
YEAR_MONTH_TOO_OLD (int): Cannot request invoices issued before 2019-01-01.
NOT_INVOICED_CUSTOMER (int): Cannot request invoices for customer who doesn't receive invoices.
"""
UNSPECIFIED = 0
UNKNOWN = 1
YEAR_MONTH_TOO_OLD = 2
NOT_INVOICED_CUSTOMER = 3
'''
InvoiceErrorEnum = InvoiceErrorEnum() # For __getattribute__
class InvoiceTypeEnum(_CreateEnumTypeUponFirstAccess):
InvoiceTypeEnum = '''\
class InvoiceType(enum.IntEnum):
"""
The possible type of invoices.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CREDIT_MEMO (int): An invoice with a negative amount. The account receives a credit.
INVOICE (int): An invoice with a positive amount. The account owes a balance.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CREDIT_MEMO = 2
INVOICE = 3
'''
InvoiceTypeEnum = InvoiceTypeEnum() # For __getattribute__
class JobPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
JobPlaceholderFieldEnum = '''\
class JobPlaceholderField(enum.IntEnum):
"""
Possible values for Job placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
JOB_ID (int): Data Type: STRING. Required. If only JOB\_ID is specified, then it must
be unique. If both JOB\_ID and LOCATION\_ID are specified, then the pair
must be unique. ID) pair must be unique.
LOCATION_ID (int): Data Type: STRING. Combination of JOB\_ID and LOCATION\_ID must be
unique per offer.
TITLE (int): Data Type: STRING. Required. Main headline with job title to be shown in
dynamic ad.
SUBTITLE (int): Data Type: STRING. Job subtitle to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of job to be shown in dynamic ad.
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad. Highly recommended for
image ads.
CATEGORY (int): Data Type: STRING. Category of property used to group like items together
for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
ADDRESS (int): Data Type: STRING. Complete property address, including postal code.
SALARY (int): Data Type: STRING. Salary or salary range of job to be shown in dynamic
ad.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific job and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_JOB_IDS (int): Data Type: STRING\_LIST. List of recommended job IDs to show together
with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
JOB_ID = 2
LOCATION_ID = 3
TITLE = 4
SUBTITLE = 5
DESCRIPTION = 6
IMAGE_URL = 7
CATEGORY = 8
CONTEXTUAL_KEYWORDS = 9
ADDRESS = 10
SALARY = 11
FINAL_URLS = 12
FINAL_MOBILE_URLS = 14
TRACKING_URL = 15
ANDROID_APP_LINK = 16
SIMILAR_JOB_IDS = 17
IOS_APP_LINK = 18
IOS_APP_STORE_ID = 19
'''
JobPlaceholderFieldEnum = JobPlaceholderFieldEnum() # For __getattribute__
class KeywordMatchTypeEnum(_CreateEnumTypeUponFirstAccess):
KeywordMatchTypeEnum = '''\
class KeywordMatchType(enum.IntEnum):
"""
Possible Keyword match types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EXACT (int): Exact match.
PHRASE (int): Phrase match.
BROAD (int): Broad match.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXACT = 2
PHRASE = 3
BROAD = 4
'''
KeywordMatchTypeEnum = KeywordMatchTypeEnum() # For __getattribute__
class KeywordPlanAdGroupErrorEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanAdGroupErrorEnum = '''\
class KeywordPlanAdGroupError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan ad group.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_NAME (int): The keyword plan ad group name is missing, empty, longer than allowed
limit or contains invalid chars.
DUPLICATE_NAME (int): The keyword plan ad group name is duplicate to an existing keyword plan
AdGroup name or other keyword plan AdGroup name in the request.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_NAME = 2
DUPLICATE_NAME = 3
'''
KeywordPlanAdGroupErrorEnum = KeywordPlanAdGroupErrorEnum() # For __getattribute__
class KeywordPlanCampaignErrorEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanCampaignErrorEnum = '''\
class KeywordPlanCampaignError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan campaign.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_NAME (int): A keyword plan campaign name is missing, empty, longer than allowed limit
or contains invalid chars.
INVALID_LANGUAGES (int): A keyword plan campaign contains one or more untargetable languages.
INVALID_GEOS (int): A keyword plan campaign contains one or more invalid geo targets.
DUPLICATE_NAME (int): The keyword plan campaign name is duplicate to an existing keyword plan
campaign name or other keyword plan campaign name in the request.
MAX_GEOS_EXCEEDED (int): The number of geo targets in the keyword plan campaign exceeds limits.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_NAME = 2
INVALID_LANGUAGES = 3
INVALID_GEOS = 4
DUPLICATE_NAME = 5
MAX_GEOS_EXCEEDED = 6
'''
KeywordPlanCampaignErrorEnum = KeywordPlanCampaignErrorEnum() # For __getattribute__
class KeywordPlanCompetitionLevelEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanCompetitionLevelEnum = '''\
class KeywordPlanCompetitionLevel(enum.IntEnum):
"""
Competition level of a keyword.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
LOW (int): Low competition.
MEDIUM (int): Medium competition.
HIGH (int): High competition.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOW = 2
MEDIUM = 3
HIGH = 4
'''
KeywordPlanCompetitionLevelEnum = KeywordPlanCompetitionLevelEnum() # For __getattribute__
class KeywordPlanErrorEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanErrorEnum = '''\
class KeywordPlanError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BID_MULTIPLIER_OUT_OF_RANGE (int): The plan's bid multiplier value is outside the valid range.
BID_TOO_HIGH (int): The plan's bid value is too high.
BID_TOO_LOW (int): The plan's bid value is too low.
BID_TOO_MANY_FRACTIONAL_DIGITS (int): The plan's cpc bid is not a multiple of the minimum billable unit.
DAILY_BUDGET_TOO_LOW (int): The plan's daily budget value is too low.
DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS (int): The plan's daily budget is not a multiple of the minimum billable unit.
INVALID_VALUE (int): The input has an invalid value.
KEYWORD_PLAN_HAS_NO_KEYWORDS (int): The plan has no keyword.
KEYWORD_PLAN_NOT_ENABLED (int): The plan is not enabled and API cannot provide mutation, forecast or
stats.
KEYWORD_PLAN_NOT_FOUND (int): The requested plan cannot be found for providing forecast or stats.
MISSING_BID (int): The plan is missing a cpc bid.
MISSING_FORECAST_PERIOD (int): The plan is missing required forecast\_period field.
INVALID_FORECAST_DATE_RANGE (int): The plan's forecast\_period has invalid forecast date range.
INVALID_NAME (int): The plan's name is invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BID_MULTIPLIER_OUT_OF_RANGE = 2
BID_TOO_HIGH = 3
BID_TOO_LOW = 4
BID_TOO_MANY_FRACTIONAL_DIGITS = 5
DAILY_BUDGET_TOO_LOW = 6
DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS = 7
INVALID_VALUE = 8
KEYWORD_PLAN_HAS_NO_KEYWORDS = 9
KEYWORD_PLAN_NOT_ENABLED = 10
KEYWORD_PLAN_NOT_FOUND = 11
MISSING_BID = 13
MISSING_FORECAST_PERIOD = 14
INVALID_FORECAST_DATE_RANGE = 15
INVALID_NAME = 16
'''
KeywordPlanErrorEnum = KeywordPlanErrorEnum() # For __getattribute__
class KeywordPlanForecastIntervalEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanForecastIntervalEnum = '''\
class KeywordPlanForecastInterval(enum.IntEnum):
"""
Forecast intervals.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
NEXT_WEEK (int): The next week date range for keyword plan. The next week is based
on the default locale of the user's account and is mostly SUN-SAT or
MON-SUN.
This can be different from next-7 days.
NEXT_MONTH (int): The next month date range for keyword plan.
NEXT_QUARTER (int): The next quarter date range for keyword plan.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEXT_WEEK = 3
NEXT_MONTH = 4
NEXT_QUARTER = 5
'''
KeywordPlanForecastIntervalEnum = KeywordPlanForecastIntervalEnum() # For __getattribute__
class KeywordPlanIdeaErrorEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanIdeaErrorEnum = '''\
class KeywordPlanIdeaError(enum.IntEnum):
"""
Enum describing possible errors from KeywordPlanIdeaService.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
URL_CRAWL_ERROR (int): Error when crawling the input URL.
INVALID_VALUE (int): The input has an invalid value.
"""
UNSPECIFIED = 0
UNKNOWN = 1
URL_CRAWL_ERROR = 2
INVALID_VALUE = 3
'''
KeywordPlanIdeaErrorEnum = KeywordPlanIdeaErrorEnum() # For __getattribute__
class KeywordPlanKeywordErrorEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanKeywordErrorEnum = '''\
class KeywordPlanKeywordError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan keyword.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_KEYWORD_MATCH_TYPE (int): A keyword or negative keyword has invalid match type.
DUPLICATE_KEYWORD (int): A keyword or negative keyword with same text and match type already
exists.
KEYWORD_TEXT_TOO_LONG (int): Keyword or negative keyword text exceeds the allowed limit.
KEYWORD_HAS_INVALID_CHARS (int): Keyword or negative keyword text has invalid characters or symbols.
KEYWORD_HAS_TOO_MANY_WORDS (int): Keyword or negative keyword text has too many words.
INVALID_KEYWORD_TEXT (int): Keyword or negative keyword has invalid text.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_KEYWORD_MATCH_TYPE = 2
DUPLICATE_KEYWORD = 3
KEYWORD_TEXT_TOO_LONG = 4
KEYWORD_HAS_INVALID_CHARS = 5
KEYWORD_HAS_TOO_MANY_WORDS = 6
INVALID_KEYWORD_TEXT = 7
'''
KeywordPlanKeywordErrorEnum = KeywordPlanKeywordErrorEnum() # For __getattribute__
class KeywordPlanNegativeKeywordErrorEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanNegativeKeywordErrorEnum = '''\
class KeywordPlanNegativeKeywordError(enum.IntEnum):
"""
Enum describing possible errors from applying a keyword plan negative
keyword.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
"""
UNSPECIFIED = 0
UNKNOWN = 1
'''
KeywordPlanNegativeKeywordErrorEnum = KeywordPlanNegativeKeywordErrorEnum() # For __getattribute__
class KeywordPlanNetworkEnum(_CreateEnumTypeUponFirstAccess):
KeywordPlanNetworkEnum = '''\
class KeywordPlanNetwork(enum.IntEnum):
"""
Enumerates keyword plan forecastable network types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
GOOGLE_SEARCH (int): Google Search.
GOOGLE_SEARCH_AND_PARTNERS (int): Google Search + Search partners.
"""
UNSPECIFIED = 0
UNKNOWN = 1
GOOGLE_SEARCH = 2
GOOGLE_SEARCH_AND_PARTNERS = 3
'''
KeywordPlanNetworkEnum = KeywordPlanNetworkEnum() # For __getattribute__
class LabelErrorEnum(_CreateEnumTypeUponFirstAccess):
LabelErrorEnum = '''\
class LabelError(enum.IntEnum):
"""
Enum describing possible label errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_APPLY_INACTIVE_LABEL (int): An inactive label cannot be applied.
CANNOT_APPLY_LABEL_TO_DISABLED_AD_GROUP_CRITERION (int): A label cannot be applied to a disabled ad group criterion.
CANNOT_APPLY_LABEL_TO_NEGATIVE_AD_GROUP_CRITERION (int): A label cannot be applied to a negative ad group criterion.
EXCEEDED_LABEL_LIMIT_PER_TYPE (int): Cannot apply more than 50 labels per resource.
INVALID_RESOURCE_FOR_MANAGER_LABEL (int): Labels from a manager account cannot be applied to campaign, ad group,
ad group ad, or ad group criterion resources.
DUPLICATE_NAME (int): Label names must be unique.
INVALID_LABEL_NAME (int): Label names cannot be empty.
CANNOT_ATTACH_LABEL_TO_DRAFT (int): Labels cannot be applied to a draft.
CANNOT_ATTACH_NON_MANAGER_LABEL_TO_CUSTOMER (int): Labels not from a manager account cannot be applied to the customer
resource.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_APPLY_INACTIVE_LABEL = 2
CANNOT_APPLY_LABEL_TO_DISABLED_AD_GROUP_CRITERION = 3
CANNOT_APPLY_LABEL_TO_NEGATIVE_AD_GROUP_CRITERION = 4
EXCEEDED_LABEL_LIMIT_PER_TYPE = 5
INVALID_RESOURCE_FOR_MANAGER_LABEL = 6
DUPLICATE_NAME = 7
INVALID_LABEL_NAME = 8
CANNOT_ATTACH_LABEL_TO_DRAFT = 9
CANNOT_ATTACH_NON_MANAGER_LABEL_TO_CUSTOMER = 10
'''
LabelErrorEnum = LabelErrorEnum() # For __getattribute__
class LabelStatusEnum(_CreateEnumTypeUponFirstAccess):
LabelStatusEnum = '''\
class LabelStatus(enum.IntEnum):
"""
Possible statuses of a label.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): Label is enabled.
REMOVED (int): Label is removed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
LabelStatusEnum = LabelStatusEnum() # For __getattribute__
class LanguageCodeErrorEnum(_CreateEnumTypeUponFirstAccess):
LanguageCodeErrorEnum = '''\
class LanguageCodeError(enum.IntEnum):
"""
Enum describing language code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
LANGUAGE_CODE_NOT_FOUND (int): The input language code is not recognized.
INVALID_LANGUAGE_CODE (int): The language is not allowed to use.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LANGUAGE_CODE_NOT_FOUND = 2
INVALID_LANGUAGE_CODE = 3
'''
LanguageCodeErrorEnum = LanguageCodeErrorEnum() # For __getattribute__
class LegacyAppInstallAdAppStoreEnum(_CreateEnumTypeUponFirstAccess):
LegacyAppInstallAdAppStoreEnum = '''\
class LegacyAppInstallAdAppStore(enum.IntEnum):
"""
App store type in a legacy app install ad.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
APPLE_APP_STORE (int): Apple iTunes.
GOOGLE_PLAY (int): Google Play.
WINDOWS_STORE (int): Windows Store.
WINDOWS_PHONE_STORE (int): Windows Phone Store.
CN_APP_STORE (int): The app is hosted in a Chinese app store.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPLE_APP_STORE = 2
GOOGLE_PLAY = 3
WINDOWS_STORE = 4
WINDOWS_PHONE_STORE = 5
CN_APP_STORE = 6
'''
LegacyAppInstallAdAppStoreEnum = LegacyAppInstallAdAppStoreEnum() # For __getattribute__
class ListOperationErrorEnum(_CreateEnumTypeUponFirstAccess):
ListOperationErrorEnum = '''\
class ListOperationError(enum.IntEnum):
"""
Enum describing possible list operation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
REQUIRED_FIELD_MISSING (int): Field required in value is missing.
DUPLICATE_VALUES (int): Duplicate or identical value is sent in multiple list operations.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUIRED_FIELD_MISSING = 7
DUPLICATE_VALUES = 8
'''
ListOperationErrorEnum = ListOperationErrorEnum() # For __getattribute__
class ListingGroupTypeEnum(_CreateEnumTypeUponFirstAccess):
ListingGroupTypeEnum = '''\
class ListingGroupType(enum.IntEnum):
"""
The type of the listing group.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SUBDIVISION (int): Subdivision of products along some listing dimension. These nodes
are not used by serving to target listing entries, but is purely
to define the structure of the tree.
UNIT (int): Listing group unit that defines a bid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SUBDIVISION = 2
UNIT = 3
'''
ListingGroupTypeEnum = ListingGroupTypeEnum() # For __getattribute__
class LocalPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
LocalPlaceholderFieldEnum = '''\
class LocalPlaceholderField(enum.IntEnum):
"""
Possible values for Local placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DEAL_ID (int): Data Type: STRING. Required. Unique ID.
DEAL_NAME (int): Data Type: STRING. Required. Main headline with local deal title to be
shown in dynamic ad.
SUBTITLE (int): Data Type: STRING. Local deal subtitle to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of local deal to be shown in dynamic ad.
PRICE (int): Data Type: STRING. Price to be shown in the ad. Highly recommended for
dynamic ads. Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
ADDRESS (int): Data Type: STRING. Complete property address, including postal code.
CATEGORY (int): Data Type: STRING. Category of local deal used to group like items
together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
FINAL_URLS (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific local deal and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_DEAL_IDS (int): Data Type: STRING\_LIST. List of recommended local deal IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DEAL_ID = 2
DEAL_NAME = 3
SUBTITLE = 4
DESCRIPTION = 5
PRICE = 6
FORMATTED_PRICE = 7
SALE_PRICE = 8
FORMATTED_SALE_PRICE = 9
IMAGE_URL = 10
ADDRESS = 11
CATEGORY = 12
CONTEXTUAL_KEYWORDS = 13
FINAL_URLS = 14
FINAL_MOBILE_URLS = 15
TRACKING_URL = 16
ANDROID_APP_LINK = 17
SIMILAR_DEAL_IDS = 18
IOS_APP_LINK = 19
IOS_APP_STORE_ID = 20
'''
LocalPlaceholderFieldEnum = LocalPlaceholderFieldEnum() # For __getattribute__
class LocationExtensionTargetingCriterionFieldEnum(_CreateEnumTypeUponFirstAccess):
LocationExtensionTargetingCriterionFieldEnum = '''\
class LocationExtensionTargetingCriterionField(enum.IntEnum):
"""
Possible values for Location Extension Targeting criterion fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADDRESS_LINE_1 (int): Data Type: STRING. Line 1 of the business address.
ADDRESS_LINE_2 (int): Data Type: STRING. Line 2 of the business address.
CITY (int): Data Type: STRING. City of the business address.
PROVINCE (int): Data Type: STRING. Province of the business address.
POSTAL_CODE (int): Data Type: STRING. Postal code of the business address.
COUNTRY_CODE (int): Data Type: STRING. Country code of the business address.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDRESS_LINE_1 = 2
ADDRESS_LINE_2 = 3
CITY = 4
PROVINCE = 5
POSTAL_CODE = 6
COUNTRY_CODE = 7
'''
LocationExtensionTargetingCriterionFieldEnum = LocationExtensionTargetingCriterionFieldEnum() # For __getattribute__
class LocationGroupRadiusUnitsEnum(_CreateEnumTypeUponFirstAccess):
LocationGroupRadiusUnitsEnum = '''\
class LocationGroupRadiusUnits(enum.IntEnum):
"""
The unit of radius distance in location group (e.g. MILES)
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
METERS (int): Meters
MILES (int): Miles
"""
UNSPECIFIED = 0
UNKNOWN = 1
METERS = 2
MILES = 3
'''
LocationGroupRadiusUnitsEnum = LocationGroupRadiusUnitsEnum() # For __getattribute__
class LocationPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
LocationPlaceholderFieldEnum = '''\
class LocationPlaceholderField(enum.IntEnum):
"""
Possible values for Location placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BUSINESS_NAME (int): Data Type: STRING. The name of the business.
ADDRESS_LINE_1 (int): Data Type: STRING. Line 1 of the business address.
ADDRESS_LINE_2 (int): Data Type: STRING. Line 2 of the business address.
CITY (int): Data Type: STRING. City of the business address.
PROVINCE (int): Data Type: STRING. Province of the business address.
POSTAL_CODE (int): Data Type: STRING. Postal code of the business address.
COUNTRY_CODE (int): Data Type: STRING. Country code of the business address.
PHONE_NUMBER (int): Data Type: STRING. Phone number of the business.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUSINESS_NAME = 2
ADDRESS_LINE_1 = 3
ADDRESS_LINE_2 = 4
CITY = 5
PROVINCE = 6
POSTAL_CODE = 7
COUNTRY_CODE = 8
PHONE_NUMBER = 9
'''
LocationPlaceholderFieldEnum = LocationPlaceholderFieldEnum() # For __getattribute__
class ManagerLinkErrorEnum(_CreateEnumTypeUponFirstAccess):
ManagerLinkErrorEnum = '''\
class ManagerLinkError(enum.IntEnum):
"""
Enum describing possible ManagerLink errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ACCOUNTS_NOT_COMPATIBLE_FOR_LINKING (int): The manager and client have incompatible account types.
TOO_MANY_MANAGERS (int): Client is already linked to too many managers.
TOO_MANY_INVITES (int): Manager has too many pending invitations.
ALREADY_INVITED_BY_THIS_MANAGER (int): Client is already invited by this manager.
ALREADY_MANAGED_BY_THIS_MANAGER (int): The client is already managed by this manager.
ALREADY_MANAGED_IN_HIERARCHY (int): Client is already managed in hierarchy.
DUPLICATE_CHILD_FOUND (int): Manger and sub-manager to be linked have duplicate client.
CLIENT_HAS_NO_ADMIN_USER (int): Client has no active user that can access the client account.
MAX_DEPTH_EXCEEDED (int): Adding this link would exceed the maximum hierarchy depth.
CYCLE_NOT_ALLOWED (int): Adding this link will create a cycle.
TOO_MANY_ACCOUNTS (int): Manager account has the maximum number of linked clients.
TOO_MANY_ACCOUNTS_AT_MANAGER (int): Parent manager account has the maximum number of linked clients.
NON_OWNER_USER_CANNOT_MODIFY_LINK (int): The account is not authorized owner.
SUSPENDED_ACCOUNT_CANNOT_ADD_CLIENTS (int): Your manager account is suspended, and you are no longer allowed to link
to clients.
CLIENT_OUTSIDE_TREE (int): You are not allowed to move a client to a manager that is not under your
current hierarchy.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACCOUNTS_NOT_COMPATIBLE_FOR_LINKING = 2
TOO_MANY_MANAGERS = 3
TOO_MANY_INVITES = 4
ALREADY_INVITED_BY_THIS_MANAGER = 5
ALREADY_MANAGED_BY_THIS_MANAGER = 6
ALREADY_MANAGED_IN_HIERARCHY = 7
DUPLICATE_CHILD_FOUND = 8
CLIENT_HAS_NO_ADMIN_USER = 9
MAX_DEPTH_EXCEEDED = 10
CYCLE_NOT_ALLOWED = 11
TOO_MANY_ACCOUNTS = 12
TOO_MANY_ACCOUNTS_AT_MANAGER = 13
NON_OWNER_USER_CANNOT_MODIFY_LINK = 14
SUSPENDED_ACCOUNT_CANNOT_ADD_CLIENTS = 15
CLIENT_OUTSIDE_TREE = 16
'''
ManagerLinkErrorEnum = ManagerLinkErrorEnum() # For __getattribute__
class ManagerLinkStatusEnum(_CreateEnumTypeUponFirstAccess):
ManagerLinkStatusEnum = '''\
class ManagerLinkStatus(enum.IntEnum):
"""
Possible statuses of a link.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ACTIVE (int): Indicates current in-effect relationship
INACTIVE (int): Indicates terminated relationship
PENDING (int): Indicates relationship has been requested by manager, but the client
hasn't accepted yet.
REFUSED (int): Relationship was requested by the manager, but the client has refused.
CANCELED (int): Indicates relationship has been requested by manager, but manager
canceled it.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
INACTIVE = 3
PENDING = 4
REFUSED = 5
CANCELED = 6
'''
ManagerLinkStatusEnum = ManagerLinkStatusEnum() # For __getattribute__
class MatchingFunctionContextTypeEnum(_CreateEnumTypeUponFirstAccess):
MatchingFunctionContextTypeEnum = '''\
class MatchingFunctionContextType(enum.IntEnum):
"""
Possible context types for an operand in a matching function.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FEED_ITEM_ID (int): Feed item id in the request context.
DEVICE_NAME (int): The device being used (possible values are 'Desktop' or 'Mobile').
"""
UNSPECIFIED = 0
UNKNOWN = 1
FEED_ITEM_ID = 2
DEVICE_NAME = 3
'''
MatchingFunctionContextTypeEnum = MatchingFunctionContextTypeEnum() # For __getattribute__
class MatchingFunctionOperatorEnum(_CreateEnumTypeUponFirstAccess):
MatchingFunctionOperatorEnum = '''\
class MatchingFunctionOperator(enum.IntEnum):
"""
Possible operators in a matching function.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
IN (int): The IN operator.
IDENTITY (int): The IDENTITY operator.
EQUALS (int): The EQUALS operator
AND (int): Operator that takes two or more operands that are of type
FunctionOperand and checks that all the operands evaluate to true. For
functions related to ad formats, all the operands must be in
left\_operands.
CONTAINS_ANY (int): Operator that returns true if the elements in left\_operands contain any
of the elements in right\_operands. Otherwise, return false. The
right\_operands must contain at least 1 and no more than 3
ConstantOperands.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
'''
MatchingFunctionOperatorEnum = MatchingFunctionOperatorEnum() # For __getattribute__
class MediaBundleErrorEnum(_CreateEnumTypeUponFirstAccess):
MediaBundleErrorEnum = '''\
class MediaBundleError(enum.IntEnum):
"""
Enum describing possible media bundle errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BAD_REQUEST (int): There was a problem with the request.
DOUBLECLICK_BUNDLE_NOT_ALLOWED (int): HTML5 ads using DoubleClick Studio created ZIP files are not supported.
EXTERNAL_URL_NOT_ALLOWED (int): Cannot reference URL external to the media bundle.
FILE_TOO_LARGE (int): Media bundle file is too large.
GOOGLE_WEB_DESIGNER_ZIP_FILE_NOT_PUBLISHED (int): ZIP file from Google Web Designer is not published.
INVALID_INPUT (int): Input was invalid.
INVALID_MEDIA_BUNDLE (int): There was a problem with the media bundle.
INVALID_MEDIA_BUNDLE_ENTRY (int): There was a problem with one or more of the media bundle entries.
INVALID_MIME_TYPE (int): The media bundle contains a file with an unknown mime type
INVALID_PATH (int): The media bundle contain an invalid asset path.
INVALID_URL_REFERENCE (int): HTML5 ad is trying to reference an asset not in .ZIP file
MEDIA_DATA_TOO_LARGE (int): Media data is too large.
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY (int): The media bundle contains no primary entry.
SERVER_ERROR (int): There was an error on the server.
STORAGE_ERROR (int): The image could not be stored.
SWIFFY_BUNDLE_NOT_ALLOWED (int): Media bundle created with the Swiffy tool is not allowed.
TOO_MANY_FILES (int): The media bundle contains too many files.
UNEXPECTED_SIZE (int): The media bundle is not of legal dimensions.
UNSUPPORTED_GOOGLE_WEB_DESIGNER_ENVIRONMENT (int): Google Web Designer not created for "Google Ads" environment.
UNSUPPORTED_HTML5_FEATURE (int): Unsupported HTML5 feature in HTML5 asset.
URL_IN_MEDIA_BUNDLE_NOT_SSL_COMPLIANT (int): URL in HTML5 entry is not ssl compliant.
CUSTOM_EXIT_NOT_ALLOWED (int): Custom exits not allowed in HTML5 entry.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BAD_REQUEST = 3
DOUBLECLICK_BUNDLE_NOT_ALLOWED = 4
EXTERNAL_URL_NOT_ALLOWED = 5
FILE_TOO_LARGE = 6
GOOGLE_WEB_DESIGNER_ZIP_FILE_NOT_PUBLISHED = 7
INVALID_INPUT = 8
INVALID_MEDIA_BUNDLE = 9
INVALID_MEDIA_BUNDLE_ENTRY = 10
INVALID_MIME_TYPE = 11
INVALID_PATH = 12
INVALID_URL_REFERENCE = 13
MEDIA_DATA_TOO_LARGE = 14
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY = 15
SERVER_ERROR = 16
STORAGE_ERROR = 17
SWIFFY_BUNDLE_NOT_ALLOWED = 18
TOO_MANY_FILES = 19
UNEXPECTED_SIZE = 20
UNSUPPORTED_GOOGLE_WEB_DESIGNER_ENVIRONMENT = 21
UNSUPPORTED_HTML5_FEATURE = 22
URL_IN_MEDIA_BUNDLE_NOT_SSL_COMPLIANT = 23
CUSTOM_EXIT_NOT_ALLOWED = 24
'''
MediaBundleErrorEnum = MediaBundleErrorEnum() # For __getattribute__
class MediaFileErrorEnum(_CreateEnumTypeUponFirstAccess):
MediaFileErrorEnum = '''\
class MediaFileError(enum.IntEnum):
"""
Enum describing possible media file errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_CREATE_STANDARD_ICON (int): Cannot create a standard icon type.
CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES (int): May only select Standard Icons alone.
CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA (int): Image contains both a media file ID and data.
DUPLICATE_MEDIA (int): A media file with given type and reference ID already exists.
EMPTY_FIELD (int): A required field was not specified or is an empty string.
RESOURCE_REFERENCED_IN_MULTIPLE_OPS (int): A media file may only be modified once per call.
FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE (int): Field is not supported for the media sub type.
INVALID_MEDIA_FILE_ID (int): The media file ID is invalid.
INVALID_MEDIA_SUB_TYPE (int): The media subtype is invalid.
INVALID_MEDIA_FILE_TYPE (int): The media file type is invalid.
INVALID_MIME_TYPE (int): The mimetype is invalid.
INVALID_REFERENCE_ID (int): The media reference ID is invalid.
INVALID_YOU_TUBE_ID (int): The YouTube video ID is invalid.
MEDIA_FILE_FAILED_TRANSCODING (int): Media file has failed transcoding
MEDIA_NOT_TRANSCODED (int): Media file has not been transcoded.
MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE (int): The media type does not match the actual media file's type.
NO_FIELDS_SPECIFIED (int): None of the fields have been specified.
NULL_REFERENCE_ID_AND_MEDIA_ID (int): One of reference ID or media file ID must be specified.
TOO_LONG (int): The string has too many characters.
UNSUPPORTED_TYPE (int): The specified type is not supported.
YOU_TUBE_SERVICE_UNAVAILABLE (int): YouTube is unavailable for requesting video data.
YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION (int): The YouTube video has a non positive duration.
YOU_TUBE_VIDEO_NOT_FOUND (int): The YouTube video ID is syntactically valid but the video was not found.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_CREATE_STANDARD_ICON = 2
CANNOT_SELECT_STANDARD_ICON_WITH_OTHER_TYPES = 3
CANNOT_SPECIFY_MEDIA_FILE_ID_AND_DATA = 4
DUPLICATE_MEDIA = 5
EMPTY_FIELD = 6
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 7
FIELD_NOT_SUPPORTED_FOR_MEDIA_SUB_TYPE = 8
INVALID_MEDIA_FILE_ID = 9
INVALID_MEDIA_SUB_TYPE = 10
INVALID_MEDIA_FILE_TYPE = 11
INVALID_MIME_TYPE = 12
INVALID_REFERENCE_ID = 13
INVALID_YOU_TUBE_ID = 14
MEDIA_FILE_FAILED_TRANSCODING = 15
MEDIA_NOT_TRANSCODED = 16
MEDIA_TYPE_DOES_NOT_MATCH_MEDIA_FILE_TYPE = 17
NO_FIELDS_SPECIFIED = 18
NULL_REFERENCE_ID_AND_MEDIA_ID = 19
TOO_LONG = 20
UNSUPPORTED_TYPE = 21
YOU_TUBE_SERVICE_UNAVAILABLE = 22
YOU_TUBE_VIDEO_HAS_NON_POSITIVE_DURATION = 23
YOU_TUBE_VIDEO_NOT_FOUND = 24
'''
MediaFileErrorEnum = MediaFileErrorEnum() # For __getattribute__
class MediaTypeEnum(_CreateEnumTypeUponFirstAccess):
MediaTypeEnum = '''\
class MediaType(enum.IntEnum):
"""
The type of media.
Attributes:
UNSPECIFIED (int): The media type has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
IMAGE (int): Static image, used for image ad.
ICON (int): Small image, used for map ad.
MEDIA_BUNDLE (int): ZIP file, used in fields of template ads.
AUDIO (int): Audio file.
VIDEO (int): Video file.
DYNAMIC_IMAGE (int): Animated image, such as animated GIF.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMAGE = 2
ICON = 3
MEDIA_BUNDLE = 4
AUDIO = 5
VIDEO = 6
DYNAMIC_IMAGE = 7
'''
MediaTypeEnum = MediaTypeEnum() # For __getattribute__
class MediaUploadErrorEnum(_CreateEnumTypeUponFirstAccess):
MediaUploadErrorEnum = '''\
class MediaUploadError(enum.IntEnum):
"""
Enum describing possible media uploading errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
FILE_TOO_BIG (int): The uploaded file is too big.
UNPARSEABLE_IMAGE (int): Image data is unparseable.
ANIMATED_IMAGE_NOT_ALLOWED (int): Animated images are not allowed.
FORMAT_NOT_ALLOWED (int): The image or media bundle format is not allowed.
EXTERNAL_URL_NOT_ALLOWED (int): Cannot reference URL external to the media bundle.
INVALID_URL_REFERENCE (int): HTML5 ad is trying to reference an asset not in .ZIP file.
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY (int): The media bundle contains no primary entry.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FILE_TOO_BIG = 2
UNPARSEABLE_IMAGE = 3
ANIMATED_IMAGE_NOT_ALLOWED = 4
FORMAT_NOT_ALLOWED = 5
EXTERNAL_URL_NOT_ALLOWED = 6
INVALID_URL_REFERENCE = 7
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY = 8
'''
MediaUploadErrorEnum = MediaUploadErrorEnum() # For __getattribute__
class MerchantCenterLinkStatusEnum(_CreateEnumTypeUponFirstAccess):
MerchantCenterLinkStatusEnum = '''\
class MerchantCenterLinkStatus(enum.IntEnum):
"""
Describes the possible statuses for a link between a Google Ads customer
and a Google Merchant Center account.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The link is enabled.
PENDING (int): The link has no effect. It was proposed by the Merchant Center Account
owner and hasn't been confirmed by the customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
PENDING = 3
'''
MerchantCenterLinkStatusEnum = MerchantCenterLinkStatusEnum() # For __getattribute__
class MessagePlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
MessagePlaceholderFieldEnum = '''\
class MessagePlaceholderField(enum.IntEnum):
"""
Possible values for Message placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BUSINESS_NAME (int): Data Type: STRING. The name of your business.
COUNTRY_CODE (int): Data Type: STRING. Country code of phone number.
PHONE_NUMBER (int): Data Type: STRING. A phone number that's capable of sending and receiving
text messages.
MESSAGE_EXTENSION_TEXT (int): Data Type: STRING. The text that will go in your click-to-message ad.
MESSAGE_TEXT (int): Data Type: STRING. The message text automatically shows in people's
messaging apps when they tap to send you a message.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUSINESS_NAME = 2
COUNTRY_CODE = 3
PHONE_NUMBER = 4
MESSAGE_EXTENSION_TEXT = 5
MESSAGE_TEXT = 6
'''
MessagePlaceholderFieldEnum = MessagePlaceholderFieldEnum() # For __getattribute__
class MimeTypeEnum(_CreateEnumTypeUponFirstAccess):
MimeTypeEnum = '''\
class MimeType(enum.IntEnum):
"""
The mime type
Attributes:
UNSPECIFIED (int): The mime type has not been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
IMAGE_JPEG (int): MIME type of image/jpeg.
IMAGE_GIF (int): MIME type of image/gif.
IMAGE_PNG (int): MIME type of image/png.
FLASH (int): MIME type of application/x-shockwave-flash.
TEXT_HTML (int): MIME type of text/html.
PDF (int): MIME type of application/pdf.
MSWORD (int): MIME type of application/msword.
MSEXCEL (int): MIME type of application/vnd.ms-excel.
RTF (int): MIME type of application/rtf.
AUDIO_WAV (int): MIME type of audio/wav.
AUDIO_MP3 (int): MIME type of audio/mp3.
HTML5_AD_ZIP (int): MIME type of application/x-html5-ad-zip.
"""
UNSPECIFIED = 0
UNKNOWN = 1
IMAGE_JPEG = 2
IMAGE_GIF = 3
IMAGE_PNG = 4
FLASH = 5
TEXT_HTML = 6
PDF = 7
MSWORD = 8
MSEXCEL = 9
RTF = 10
AUDIO_WAV = 11
AUDIO_MP3 = 12
HTML5_AD_ZIP = 13
'''
MimeTypeEnum = MimeTypeEnum() # For __getattribute__
class MinuteOfHourEnum(_CreateEnumTypeUponFirstAccess):
MinuteOfHourEnum = '''\
class MinuteOfHour(enum.IntEnum):
"""
Enumerates of quarter-hours. E.g. "FIFTEEN"
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
ZERO (int): Zero minutes past the hour.
FIFTEEN (int): Fifteen minutes past the hour.
THIRTY (int): Thirty minutes past the hour.
FORTY_FIVE (int): Forty-five minutes past the hour.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ZERO = 2
FIFTEEN = 3
THIRTY = 4
FORTY_FIVE = 5
'''
MinuteOfHourEnum = MinuteOfHourEnum() # For __getattribute__
class MobileDeviceTypeEnum(_CreateEnumTypeUponFirstAccess):
MobileDeviceTypeEnum = '''\
class MobileDeviceType(enum.IntEnum):
"""
The type of mobile device.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MOBILE (int): Mobile phones.
TABLET (int): Tablets.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
TABLET = 3
'''
MobileDeviceTypeEnum = MobileDeviceTypeEnum() # For __getattribute__
class MonthOfYearEnum(_CreateEnumTypeUponFirstAccess):
MonthOfYearEnum = '''\
class MonthOfYear(enum.IntEnum):
"""
Enumerates months of the year, e.g., "January".
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
JANUARY (int): January.
FEBRUARY (int): February.
MARCH (int): March.
APRIL (int): April.
MAY (int): May.
JUNE (int): June.
JULY (int): July.
AUGUST (int): August.
SEPTEMBER (int): September.
OCTOBER (int): October.
NOVEMBER (int): November.
DECEMBER (int): December.
"""
UNSPECIFIED = 0
UNKNOWN = 1
JANUARY = 2
FEBRUARY = 3
MARCH = 4
APRIL = 5
MAY = 6
JUNE = 7
JULY = 8
AUGUST = 9
SEPTEMBER = 10
OCTOBER = 11
NOVEMBER = 12
DECEMBER = 13
'''
MonthOfYearEnum = MonthOfYearEnum() # For __getattribute__
class MultiplierErrorEnum(_CreateEnumTypeUponFirstAccess):
MultiplierErrorEnum = '''\
class MultiplierError(enum.IntEnum):
"""
Enum describing possible multiplier errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
MULTIPLIER_TOO_HIGH (int): Multiplier value is too high
MULTIPLIER_TOO_LOW (int): Multiplier value is too low
TOO_MANY_FRACTIONAL_DIGITS (int): Too many fractional digits
MULTIPLIER_NOT_ALLOWED_FOR_BIDDING_STRATEGY (int): A multiplier cannot be set for this bidding strategy
MULTIPLIER_NOT_ALLOWED_WHEN_BASE_BID_IS_MISSING (int): A multiplier cannot be set when there is no base bid (e.g., content max
cpc)
NO_MULTIPLIER_SPECIFIED (int): A bid multiplier must be specified
MULTIPLIER_CAUSES_BID_TO_EXCEED_DAILY_BUDGET (int): Multiplier causes bid to exceed daily budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_MONTHLY_BUDGET (int): Multiplier causes bid to exceed monthly budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_CUSTOM_BUDGET (int): Multiplier causes bid to exceed custom budget
MULTIPLIER_CAUSES_BID_TO_EXCEED_MAX_ALLOWED_BID (int): Multiplier causes bid to exceed maximum allowed bid
BID_LESS_THAN_MIN_ALLOWED_BID_WITH_MULTIPLIER (int): Multiplier causes bid to become less than the minimum bid allowed
MULTIPLIER_AND_BIDDING_STRATEGY_TYPE_MISMATCH (int): Multiplier type (cpc vs. cpm) needs to match campaign's bidding strategy
"""
UNSPECIFIED = 0
UNKNOWN = 1
MULTIPLIER_TOO_HIGH = 2
MULTIPLIER_TOO_LOW = 3
TOO_MANY_FRACTIONAL_DIGITS = 4
MULTIPLIER_NOT_ALLOWED_FOR_BIDDING_STRATEGY = 5
MULTIPLIER_NOT_ALLOWED_WHEN_BASE_BID_IS_MISSING = 6
NO_MULTIPLIER_SPECIFIED = 7
MULTIPLIER_CAUSES_BID_TO_EXCEED_DAILY_BUDGET = 8
MULTIPLIER_CAUSES_BID_TO_EXCEED_MONTHLY_BUDGET = 9
MULTIPLIER_CAUSES_BID_TO_EXCEED_CUSTOM_BUDGET = 10
MULTIPLIER_CAUSES_BID_TO_EXCEED_MAX_ALLOWED_BID = 11
BID_LESS_THAN_MIN_ALLOWED_BID_WITH_MULTIPLIER = 12
MULTIPLIER_AND_BIDDING_STRATEGY_TYPE_MISMATCH = 13
'''
MultiplierErrorEnum = MultiplierErrorEnum() # For __getattribute__
class MutateErrorEnum(_CreateEnumTypeUponFirstAccess):
MutateErrorEnum = '''\
class MutateError(enum.IntEnum):
"""
Enum describing possible mutate errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
RESOURCE_NOT_FOUND (int): Requested resource was not found.
ID_EXISTS_IN_MULTIPLE_MUTATES (int): Cannot mutate the same resource twice in one request.
INCONSISTENT_FIELD_VALUES (int): The field's contents don't match another field that represents the same
data.
MUTATE_NOT_ALLOWED (int): Mutates are not allowed for the requested resource.
RESOURCE_NOT_IN_GOOGLE_ADS (int): The resource isn't in Google Ads. It belongs to another ads system.
RESOURCE_ALREADY_EXISTS (int): The resource being created already exists.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_NOT_FOUND = 3
ID_EXISTS_IN_MULTIPLE_MUTATES = 7
INCONSISTENT_FIELD_VALUES = 8
MUTATE_NOT_ALLOWED = 9
RESOURCE_NOT_IN_GOOGLE_ADS = 10
RESOURCE_ALREADY_EXISTS = 11
'''
MutateErrorEnum = MutateErrorEnum() # For __getattribute__
class MutateJobErrorEnum(_CreateEnumTypeUponFirstAccess):
MutateJobErrorEnum = '''\
class MutateJobError(enum.IntEnum):
"""
Enum describing possible request errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_MODIFY_JOB_AFTER_JOB_STARTS_RUNNING (int): The mutate job cannot add more operations or run after it has started
running.
EMPTY_OPERATIONS (int): The operations for an AddMutateJobOperations request were empty.
INVALID_SEQUENCE_TOKEN (int): The sequence token for an AddMutateJobOperations request was invalid.
RESULTS_NOT_READY (int): Mutate Job Results can only be retrieved once the job is finished.
INVALID_PAGE_SIZE (int): The page size for ListMutateJobResults was invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_MODIFY_JOB_AFTER_JOB_STARTS_RUNNING = 2
EMPTY_OPERATIONS = 3
INVALID_SEQUENCE_TOKEN = 4
RESULTS_NOT_READY = 5
INVALID_PAGE_SIZE = 6
'''
MutateJobErrorEnum = MutateJobErrorEnum() # For __getattribute__
class MutateJobStatusEnum(_CreateEnumTypeUponFirstAccess):
MutateJobStatusEnum = '''\
class MutateJobStatus(enum.IntEnum):
"""
The mutate job statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PENDING (int): The job is not currently running.
RUNNING (int): The job is running.
DONE (int): The job is done.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
RUNNING = 3
DONE = 4
'''
MutateJobStatusEnum = MutateJobStatusEnum() # For __getattribute__
class NegativeGeoTargetTypeEnum(_CreateEnumTypeUponFirstAccess):
NegativeGeoTargetTypeEnum = '''\
class NegativeGeoTargetType(enum.IntEnum):
"""
The possible negative geo target types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
PRESENCE_OR_INTEREST (int): Specifies that a user is excluded from seeing the ad if they
are in, or show interest in, advertiser's excluded locations.
PRESENCE (int): Specifies that a user is excluded from seeing the ad if they
are in advertiser's excluded locations.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PRESENCE_OR_INTEREST = 4
PRESENCE = 5
'''
NegativeGeoTargetTypeEnum = NegativeGeoTargetTypeEnum() # For __getattribute__
class NewResourceCreationErrorEnum(_CreateEnumTypeUponFirstAccess):
NewResourceCreationErrorEnum = '''\
class NewResourceCreationError(enum.IntEnum):
"""
Enum describing possible new resource creation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CANNOT_SET_ID_FOR_CREATE (int): Do not set the id field while creating new resources.
DUPLICATE_TEMP_IDS (int): Creating more than one resource with the same temp ID is not allowed.
TEMP_ID_RESOURCE_HAD_ERRORS (int): Parent resource with specified temp ID failed validation, so no
validation will be done for this child resource.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_SET_ID_FOR_CREATE = 2
DUPLICATE_TEMP_IDS = 3
TEMP_ID_RESOURCE_HAD_ERRORS = 4
'''
NewResourceCreationErrorEnum = NewResourceCreationErrorEnum() # For __getattribute__
class NotEmptyErrorEnum(_CreateEnumTypeUponFirstAccess):
NotEmptyErrorEnum = '''\
class NotEmptyError(enum.IntEnum):
"""
Enum describing possible not empty errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EMPTY_LIST (int): Empty list.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EMPTY_LIST = 2
'''
NotEmptyErrorEnum = NotEmptyErrorEnum() # For __getattribute__
class NotWhitelistedErrorEnum(_CreateEnumTypeUponFirstAccess):
NotWhitelistedErrorEnum = '''\
class NotWhitelistedError(enum.IntEnum):
"""
Enum describing possible not whitelisted errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CUSTOMER_NOT_WHITELISTED_FOR_THIS_FEATURE (int): Customer is not whitelisted for accessing this feature.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_NOT_WHITELISTED_FOR_THIS_FEATURE = 2
'''
NotWhitelistedErrorEnum = NotWhitelistedErrorEnum() # For __getattribute__
class NullErrorEnum(_CreateEnumTypeUponFirstAccess):
NullErrorEnum = '''\
class NullError(enum.IntEnum):
"""
Enum describing possible null errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NULL_CONTENT (int): Specified list/container must not contain any null elements
"""
UNSPECIFIED = 0
UNKNOWN = 1
NULL_CONTENT = 2
'''
NullErrorEnum = NullErrorEnum() # For __getattribute__
class OperatingSystemVersionOperatorTypeEnum(_CreateEnumTypeUponFirstAccess):
OperatingSystemVersionOperatorTypeEnum = '''\
class OperatingSystemVersionOperatorType(enum.IntEnum):
"""
The type of operating system version.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EQUALS_TO (int): Equals to the specified version.
GREATER_THAN_EQUALS_TO (int): Greater than or equals to the specified version.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS_TO = 2
GREATER_THAN_EQUALS_TO = 4
'''
OperatingSystemVersionOperatorTypeEnum = OperatingSystemVersionOperatorTypeEnum() # For __getattribute__
class OperationAccessDeniedErrorEnum(_CreateEnumTypeUponFirstAccess):
OperationAccessDeniedErrorEnum = '''\
class OperationAccessDeniedError(enum.IntEnum):
"""
Enum describing possible operation access denied errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ACTION_NOT_PERMITTED (int): Unauthorized invocation of a service's method (get, mutate, etc.)
CREATE_OPERATION_NOT_PERMITTED (int): Unauthorized CREATE operation in invoking a service's mutate method.
REMOVE_OPERATION_NOT_PERMITTED (int): Unauthorized REMOVE operation in invoking a service's mutate method.
UPDATE_OPERATION_NOT_PERMITTED (int): Unauthorized UPDATE operation in invoking a service's mutate method.
MUTATE_ACTION_NOT_PERMITTED_FOR_CLIENT (int): A mutate action is not allowed on this campaign, from this client.
OPERATION_NOT_PERMITTED_FOR_CAMPAIGN_TYPE (int): This operation is not permitted on this campaign type
CREATE_AS_REMOVED_NOT_PERMITTED (int): A CREATE operation may not set status to REMOVED.
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE (int): This operation is not allowed because the campaign or adgroup is removed.
OPERATION_NOT_PERMITTED_FOR_AD_GROUP_TYPE (int): This operation is not permitted on this ad group type.
MUTATE_NOT_PERMITTED_FOR_CUSTOMER (int): The mutate is not allowed for this customer.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACTION_NOT_PERMITTED = 2
CREATE_OPERATION_NOT_PERMITTED = 3
REMOVE_OPERATION_NOT_PERMITTED = 4
UPDATE_OPERATION_NOT_PERMITTED = 5
MUTATE_ACTION_NOT_PERMITTED_FOR_CLIENT = 6
OPERATION_NOT_PERMITTED_FOR_CAMPAIGN_TYPE = 7
CREATE_AS_REMOVED_NOT_PERMITTED = 8
OPERATION_NOT_PERMITTED_FOR_REMOVED_RESOURCE = 9
OPERATION_NOT_PERMITTED_FOR_AD_GROUP_TYPE = 10
MUTATE_NOT_PERMITTED_FOR_CUSTOMER = 11
'''
OperationAccessDeniedErrorEnum = OperationAccessDeniedErrorEnum() # For __getattribute__
class OperatorErrorEnum(_CreateEnumTypeUponFirstAccess):
OperatorErrorEnum = '''\
class OperatorError(enum.IntEnum):
"""
Enum describing possible operator errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
OPERATOR_NOT_SUPPORTED (int): Operator not supported.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPERATOR_NOT_SUPPORTED = 2
'''
OperatorErrorEnum = OperatorErrorEnum() # For __getattribute__
class ParentalStatusTypeEnum(_CreateEnumTypeUponFirstAccess):
ParentalStatusTypeEnum = '''\
class ParentalStatusType(enum.IntEnum):
"""
The type of parental statuses (e.g. not a parent).
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PARENT (int): Parent.
NOT_A_PARENT (int): Not a parent.
UNDETERMINED (int): Undetermined parental status.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PARENT = 300
NOT_A_PARENT = 301
UNDETERMINED = 302
'''
ParentalStatusTypeEnum = ParentalStatusTypeEnum() # For __getattribute__
class PartialFailureErrorEnum(_CreateEnumTypeUponFirstAccess):
PartialFailureErrorEnum = '''\
class PartialFailureError(enum.IntEnum):
"""
Enum describing possible partial failure errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
PARTIAL_FAILURE_MODE_REQUIRED (int): The partial failure field was false in the request.
This method requires this field be set to true.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PARTIAL_FAILURE_MODE_REQUIRED = 2
'''
PartialFailureErrorEnum = PartialFailureErrorEnum() # For __getattribute__
class PaymentModeEnum(_CreateEnumTypeUponFirstAccess):
PaymentModeEnum = '''\
class PaymentMode(enum.IntEnum):
"""
Enum describing possible payment modes.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CLICKS (int): Pay per click.
CONVERSION_VALUE (int): Pay per conversion value. This mode is only supported by campaigns with
AdvertisingChannelType.HOTEL, BiddingStrategyType.COMMISSION, and
BudgetType.HOTEL\_ADS\_COMMISSION.
CONVERSIONS (int): Pay per conversion. This mode is only supported by campaigns with
AdvertisingChannelType.DISPLAY (excluding
AdvertisingChannelSubType.DISPLAY\_GMAIL),
BiddingStrategyType.TARGET\_CPA, and BudgetType.FIXED\_CPA. The customer
must also be eligible for this mode. See
Customer.eligibility\_failure\_reasons for details.
GUEST_STAY (int): Pay per guest stay value. This mode is only supported by campaigns with
AdvertisingChannelType.HOTEL, BiddingStrategyType.COMMISSION, and
BudgetType.HOTEL\_ADS\_COMMISSION.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CLICKS = 4
CONVERSION_VALUE = 5
CONVERSIONS = 6
GUEST_STAY = 7
'''
PaymentModeEnum = PaymentModeEnum() # For __getattribute__
class PaymentsAccountErrorEnum(_CreateEnumTypeUponFirstAccess):
PaymentsAccountErrorEnum = '''\
class PaymentsAccountError(enum.IntEnum):
"""
Enum describing possible errors in payments account service.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
NOT_SUPPORTED_FOR_MANAGER_CUSTOMER (int): Manager customers are not supported for payments account service.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_SUPPORTED_FOR_MANAGER_CUSTOMER = 2
'''
PaymentsAccountErrorEnum = PaymentsAccountErrorEnum() # For __getattribute__
class PlaceholderTypeEnum(_CreateEnumTypeUponFirstAccess):
PlaceholderTypeEnum = '''\
class PlaceholderType(enum.IntEnum):
"""
Possible placeholder types for a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SITELINK (int): Lets you show links in your ad to pages from your website, including the
main landing page.
CALL (int): Lets you attach a phone number to an ad, allowing customers to call
directly from the ad.
APP (int): Lets you provide users with a link that points to a mobile app in
addition to a website.
LOCATION (int): Lets you show locations of businesses from your Google My Business
account in your ad. This helps people find your locations by showing your
ads with your address, a map to your location, or the distance to your
business. This extension type is useful to draw customers to your
brick-and-mortar location.
AFFILIATE_LOCATION (int): If you sell your product through retail chains, affiliate location
extensions let you show nearby stores that carry your products.
CALLOUT (int): Lets you include additional text with your search ads that provide
detailed information about your business, including products and services
you offer. Callouts appear in ads at the top and bottom of Google search
results.
STRUCTURED_SNIPPET (int): Lets you add more info to your ad, specific to some predefined categories
such as types, brands, styles, etc. A minimum of 3 text (SNIPPETS) values
are required.
MESSAGE (int): Allows users to see your ad, click an icon, and contact you directly by
text message. With one tap on your ad, people can contact you to book an
appointment, get a quote, ask for information, or request a service.
PRICE (int): Lets you display prices for a list of items along with your ads. A price
feed is composed of three to eight price table rows.
PROMOTION (int): Allows you to highlight sales and other promotions that let users see how
they can save by buying now.
AD_CUSTOMIZER (int): Lets you dynamically inject custom data into the title and description
of your ads.
DYNAMIC_EDUCATION (int): Indicates that this feed is for education dynamic remarketing.
DYNAMIC_FLIGHT (int): Indicates that this feed is for flight dynamic remarketing.
DYNAMIC_CUSTOM (int): Indicates that this feed is for a custom dynamic remarketing type. Use
this only if the other business types don't apply to your products or
services.
DYNAMIC_HOTEL (int): Indicates that this feed is for hotels and rentals dynamic remarketing.
DYNAMIC_REAL_ESTATE (int): Indicates that this feed is for real estate dynamic remarketing.
DYNAMIC_TRAVEL (int): Indicates that this feed is for travel dynamic remarketing.
DYNAMIC_LOCAL (int): Indicates that this feed is for local deals dynamic remarketing.
DYNAMIC_JOB (int): Indicates that this feed is for job dynamic remarketing.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SITELINK = 2
CALL = 3
APP = 4
LOCATION = 5
AFFILIATE_LOCATION = 6
CALLOUT = 7
STRUCTURED_SNIPPET = 8
MESSAGE = 9
PRICE = 10
PROMOTION = 11
AD_CUSTOMIZER = 12
DYNAMIC_EDUCATION = 13
DYNAMIC_FLIGHT = 14
DYNAMIC_CUSTOM = 15
DYNAMIC_HOTEL = 16
DYNAMIC_REAL_ESTATE = 17
DYNAMIC_TRAVEL = 18
DYNAMIC_LOCAL = 19
DYNAMIC_JOB = 20
'''
PlaceholderTypeEnum = PlaceholderTypeEnum() # For __getattribute__
class PlacementTypeEnum(_CreateEnumTypeUponFirstAccess):
PlacementTypeEnum = '''\
class PlacementType(enum.IntEnum):
"""
Possible placement types for a feed mapping.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
WEBSITE (int): Websites(e.g. 'www.flowers4sale.com').
MOBILE_APP_CATEGORY (int): Mobile application categories(e.g. 'Games').
MOBILE_APPLICATION (int): mobile applications(e.g. 'mobileapp::2-com.whatsthewordanswers').
YOUTUBE_VIDEO (int): YouTube videos(e.g. 'youtube.com/video/wtLJPvx7-ys').
YOUTUBE_CHANNEL (int): YouTube channels(e.g. 'youtube.com::L8ZULXASCc1I\_oaOT0NaOQ').
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBSITE = 2
MOBILE_APP_CATEGORY = 3
MOBILE_APPLICATION = 4
YOUTUBE_VIDEO = 5
YOUTUBE_CHANNEL = 6
'''
PlacementTypeEnum = PlacementTypeEnum() # For __getattribute__
class PolicyApprovalStatusEnum(_CreateEnumTypeUponFirstAccess):
PolicyApprovalStatusEnum = '''\
class PolicyApprovalStatus(enum.IntEnum):
"""
The possible policy approval statuses. When there are several approval
statuses available the most severe one will be used. The order of
severity is DISAPPROVED, AREA\_OF\_INTEREST\_ONLY, APPROVED\_LIMITED and
APPROVED.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
DISAPPROVED (int): Will not serve.
APPROVED_LIMITED (int): Serves with restrictions.
APPROVED (int): Serves without restrictions.
AREA_OF_INTEREST_ONLY (int): Will not serve in targeted countries, but may serve for users who are
searching for information about the targeted countries.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISAPPROVED = 2
APPROVED_LIMITED = 3
APPROVED = 4
AREA_OF_INTEREST_ONLY = 5
'''
PolicyApprovalStatusEnum = PolicyApprovalStatusEnum() # For __getattribute__
class PolicyFindingErrorEnum(_CreateEnumTypeUponFirstAccess):
PolicyFindingErrorEnum = '''\
class PolicyFindingError(enum.IntEnum):
"""
Enum describing possible policy finding errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
POLICY_FINDING (int): The resource has been disapproved since the policy summary includes
policy topics of type PROHIBITED.
POLICY_TOPIC_NOT_FOUND (int): The given policy topic does not exist.
"""
UNSPECIFIED = 0
UNKNOWN = 1
POLICY_FINDING = 2
POLICY_TOPIC_NOT_FOUND = 3
'''
PolicyFindingErrorEnum = PolicyFindingErrorEnum() # For __getattribute__
class PolicyReviewStatusEnum(_CreateEnumTypeUponFirstAccess):
PolicyReviewStatusEnum = '''\
class PolicyReviewStatus(enum.IntEnum):
"""
The possible policy review statuses.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
REVIEW_IN_PROGRESS (int): Currently under review.
REVIEWED (int): Primary review complete. Other reviews may be continuing.
UNDER_APPEAL (int): The resource has been resubmitted for approval or its policy decision has
been appealed.
ELIGIBLE_MAY_SERVE (int): The resource is eligible and may be serving but could still undergo
further review.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REVIEW_IN_PROGRESS = 2
REVIEWED = 3
UNDER_APPEAL = 4
ELIGIBLE_MAY_SERVE = 5
'''
PolicyReviewStatusEnum = PolicyReviewStatusEnum() # For __getattribute__
class PolicyTopicEntryTypeEnum(_CreateEnumTypeUponFirstAccess):
PolicyTopicEntryTypeEnum = '''\
class PolicyTopicEntryType(enum.IntEnum):
"""
The possible policy topic entry types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
PROHIBITED (int): The resource will not be served.
LIMITED (int): The resource will not be served under some circumstances.
FULLY_LIMITED (int): The resource cannot serve at all because of the current targeting
criteria.
DESCRIPTIVE (int): May be of interest, but does not limit how the resource is served.
BROADENING (int): Could increase coverage beyond normal.
AREA_OF_INTEREST_ONLY (int): Constrained for all targeted countries, but may serve in other countries
through area of interest.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROHIBITED = 2
LIMITED = 4
FULLY_LIMITED = 8
DESCRIPTIVE = 5
BROADENING = 6
AREA_OF_INTEREST_ONLY = 7
'''
PolicyTopicEntryTypeEnum = PolicyTopicEntryTypeEnum() # For __getattribute__
class PolicyTopicEvidenceDestinationMismatchUrlTypeEnum(_CreateEnumTypeUponFirstAccess):
PolicyTopicEvidenceDestinationMismatchUrlTypeEnum = '''\
class PolicyTopicEvidenceDestinationMismatchUrlType(enum.IntEnum):
"""
The possible policy topic evidence destination mismatch url types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
DISPLAY_URL (int): The display url.
FINAL_URL (int): The final url.
FINAL_MOBILE_URL (int): The final mobile url.
TRACKING_URL (int): The tracking url template, with substituted desktop url.
MOBILE_TRACKING_URL (int): The tracking url template, with substituted mobile url.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISPLAY_URL = 2
FINAL_URL = 3
FINAL_MOBILE_URL = 4
TRACKING_URL = 5
MOBILE_TRACKING_URL = 6
'''
PolicyTopicEvidenceDestinationMismatchUrlTypeEnum = PolicyTopicEvidenceDestinationMismatchUrlTypeEnum() # For __getattribute__
class PolicyTopicEvidenceDestinationNotWorkingDeviceEnum(_CreateEnumTypeUponFirstAccess):
PolicyTopicEvidenceDestinationNotWorkingDeviceEnum = '''\
class PolicyTopicEvidenceDestinationNotWorkingDevice(enum.IntEnum):
"""
The possible policy topic evidence destination not working devices.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
DESKTOP (int): Landing page doesn't work on desktop device.
ANDROID (int): Landing page doesn't work on Android device.
IOS (int): Landing page doesn't work on iOS device.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DESKTOP = 2
ANDROID = 3
IOS = 4
'''
PolicyTopicEvidenceDestinationNotWorkingDeviceEnum = PolicyTopicEvidenceDestinationNotWorkingDeviceEnum() # For __getattribute__
class PolicyTopicEvidenceDestinationNotWorkingDnsErrorTypeEnum(_CreateEnumTypeUponFirstAccess):
PolicyTopicEvidenceDestinationNotWorkingDnsErrorTypeEnum = '''\
class PolicyTopicEvidenceDestinationNotWorkingDnsErrorType(enum.IntEnum):
"""
The possible policy topic evidence destination not working DNS error types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
HOSTNAME_NOT_FOUND (int): Host name not found in DNS when fetching landing page.
GOOGLE_CRAWLER_DNS_ISSUE (int): Google internal crawler issue when communicating with DNS. This error
doesn't mean the landing page doesn't work. Google will recrawl the
landing page.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HOSTNAME_NOT_FOUND = 2
GOOGLE_CRAWLER_DNS_ISSUE = 3
'''
PolicyTopicEvidenceDestinationNotWorkingDnsErrorTypeEnum = PolicyTopicEvidenceDestinationNotWorkingDnsErrorTypeEnum() # For __getattribute__
class PolicyValidationParameterErrorEnum(_CreateEnumTypeUponFirstAccess):
PolicyValidationParameterErrorEnum = '''\
class PolicyValidationParameterError(enum.IntEnum):
"""
Enum describing possible policy validation parameter errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
UNSUPPORTED_AD_TYPE_FOR_IGNORABLE_POLICY_TOPICS (int): Ignorable policy topics are not supported for the ad type.
UNSUPPORTED_AD_TYPE_FOR_EXEMPT_POLICY_VIOLATION_KEYS (int): Exempt policy violation keys are not supported for the ad type.
CANNOT_SET_BOTH_IGNORABLE_POLICY_TOPICS_AND_EXEMPT_POLICY_VIOLATION_KEYS (int): Cannot set ignorable policy topics and exempt policy violation keys in
the same policy violation parameter.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNSUPPORTED_AD_TYPE_FOR_IGNORABLE_POLICY_TOPICS = 2
UNSUPPORTED_AD_TYPE_FOR_EXEMPT_POLICY_VIOLATION_KEYS = 3
CANNOT_SET_BOTH_IGNORABLE_POLICY_TOPICS_AND_EXEMPT_POLICY_VIOLATION_KEYS = 4
'''
PolicyValidationParameterErrorEnum = PolicyValidationParameterErrorEnum() # For __getattribute__
class PolicyViolationErrorEnum(_CreateEnumTypeUponFirstAccess):
PolicyViolationErrorEnum = '''\
class PolicyViolationError(enum.IntEnum):
"""
Enum describing possible policy violation errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
POLICY_ERROR (int): A policy was violated. See PolicyViolationDetails for more detail.
"""
UNSPECIFIED = 0
UNKNOWN = 1
POLICY_ERROR = 2
'''
PolicyViolationErrorEnum = PolicyViolationErrorEnum() # For __getattribute__
class PositiveGeoTargetTypeEnum(_CreateEnumTypeUponFirstAccess):
PositiveGeoTargetTypeEnum = '''\
class PositiveGeoTargetType(enum.IntEnum):
"""
The possible positive geo target types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
PRESENCE_OR_INTEREST (int): Specifies that an ad is triggered if the user is in,
or shows interest in, advertiser's targeted locations.
SEARCH_INTEREST (int): Specifies that an ad is triggered if the user
searches for advertiser's targeted locations.
PRESENCE (int): Specifies that an ad is triggered if the user is in
or regularly in advertiser's targeted locations.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PRESENCE_OR_INTEREST = 5
SEARCH_INTEREST = 6
PRESENCE = 7
'''
PositiveGeoTargetTypeEnum = PositiveGeoTargetTypeEnum() # For __getattribute__
class PreferredContentTypeEnum(_CreateEnumTypeUponFirstAccess):
PreferredContentTypeEnum = '''\
class PreferredContentType(enum.IntEnum):
"""
Enumerates preferred content criterion type.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
YOUTUBE_TOP_CONTENT (int): Represents top content on YouTube.
"""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE_TOP_CONTENT = 400
'''
PreferredContentTypeEnum = PreferredContentTypeEnum() # For __getattribute__
class PriceExtensionPriceQualifierEnum(_CreateEnumTypeUponFirstAccess):
PriceExtensionPriceQualifierEnum = '''\
class PriceExtensionPriceQualifier(enum.IntEnum):
"""
Enums of price extension price qualifier.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FROM (int): 'From' qualifier for the price.
UP_TO (int): 'Up to' qualifier for the price.
AVERAGE (int): 'Average' qualifier for the price.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FROM = 2
UP_TO = 3
AVERAGE = 4
'''
PriceExtensionPriceQualifierEnum = PriceExtensionPriceQualifierEnum() # For __getattribute__
class PriceExtensionPriceUnitEnum(_CreateEnumTypeUponFirstAccess):
PriceExtensionPriceUnitEnum = '''\
class PriceExtensionPriceUnit(enum.IntEnum):
"""
Price extension price unit.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PER_HOUR (int): Per hour.
PER_DAY (int): Per day.
PER_WEEK (int): Per week.
PER_MONTH (int): Per month.
PER_YEAR (int): Per year.
PER_NIGHT (int): Per night.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PER_HOUR = 2
PER_DAY = 3
PER_WEEK = 4
PER_MONTH = 5
PER_YEAR = 6
PER_NIGHT = 7
'''
PriceExtensionPriceUnitEnum = PriceExtensionPriceUnitEnum() # For __getattribute__
class PriceExtensionTypeEnum(_CreateEnumTypeUponFirstAccess):
PriceExtensionTypeEnum = '''\
class PriceExtensionType(enum.IntEnum):
"""
Price extension type.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BRANDS (int): The type for showing a list of brands.
EVENTS (int): The type for showing a list of events.
LOCATIONS (int): The type for showing locations relevant to your business.
NEIGHBORHOODS (int): The type for showing sub-regions or districts within a city or region.
PRODUCT_CATEGORIES (int): The type for showing a collection of product categories.
PRODUCT_TIERS (int): The type for showing a collection of related product tiers.
SERVICES (int): The type for showing a collection of services offered by your business.
SERVICE_CATEGORIES (int): The type for showing a collection of service categories.
SERVICE_TIERS (int): The type for showing a collection of related service tiers.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BRANDS = 2
EVENTS = 3
LOCATIONS = 4
NEIGHBORHOODS = 5
PRODUCT_CATEGORIES = 6
PRODUCT_TIERS = 7
SERVICES = 8
SERVICE_CATEGORIES = 9
SERVICE_TIERS = 10
'''
PriceExtensionTypeEnum = PriceExtensionTypeEnum() # For __getattribute__
class PricePlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
PricePlaceholderFieldEnum = '''\
class PricePlaceholderField(enum.IntEnum):
"""
Possible values for Price placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
TYPE (int): Data Type: STRING. The type of your price feed. Must match one of the
predefined price feed type exactly.
PRICE_QUALIFIER (int): Data Type: STRING. The qualifier of each price. Must match one of the
predefined price qualifiers exactly.
TRACKING_TEMPLATE (int): Data Type: URL. Tracking template for the price feed when using Upgraded
URLs.
LANGUAGE (int): Data Type: STRING. Language of the price feed. Must match one of the
available available locale codes exactly.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for the price feed when using
parallel tracking.
ITEM_1_HEADER (int): Data Type: STRING. The header of item 1 of the table.
ITEM_1_DESCRIPTION (int): Data Type: STRING. The description of item 1 of the table.
ITEM_1_PRICE (int): Data Type: MONEY. The price (money with currency) of item 1 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_1_UNIT (int): Data Type: STRING. The price unit of item 1 of the table. Must match one
of the predefined price units.
ITEM_1_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 1 of the table when using
Upgraded URLs.
ITEM_1_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 1 of the table when
using Upgraded URLs.
ITEM_2_HEADER (int): Data Type: STRING. The header of item 2 of the table.
ITEM_2_DESCRIPTION (int): Data Type: STRING. The description of item 2 of the table.
ITEM_2_PRICE (int): Data Type: MONEY. The price (money with currency) of item 2 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_2_UNIT (int): Data Type: STRING. The price unit of item 2 of the table. Must match one
of the predefined price units.
ITEM_2_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 2 of the table when using
Upgraded URLs.
ITEM_2_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 2 of the table when
using Upgraded URLs.
ITEM_3_HEADER (int): Data Type: STRING. The header of item 3 of the table.
ITEM_3_DESCRIPTION (int): Data Type: STRING. The description of item 3 of the table.
ITEM_3_PRICE (int): Data Type: MONEY. The price (money with currency) of item 3 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_3_UNIT (int): Data Type: STRING. The price unit of item 3 of the table. Must match one
of the predefined price units.
ITEM_3_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 3 of the table when using
Upgraded URLs.
ITEM_3_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 3 of the table when
using Upgraded URLs.
ITEM_4_HEADER (int): Data Type: STRING. The header of item 4 of the table.
ITEM_4_DESCRIPTION (int): Data Type: STRING. The description of item 4 of the table.
ITEM_4_PRICE (int): Data Type: MONEY. The price (money with currency) of item 4 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_4_UNIT (int): Data Type: STRING. The price unit of item 4 of the table. Must match one
of the predefined price units.
ITEM_4_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 4 of the table when using
Upgraded URLs.
ITEM_4_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 4 of the table when
using Upgraded URLs.
ITEM_5_HEADER (int): Data Type: STRING. The header of item 5 of the table.
ITEM_5_DESCRIPTION (int): Data Type: STRING. The description of item 5 of the table.
ITEM_5_PRICE (int): Data Type: MONEY. The price (money with currency) of item 5 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_5_UNIT (int): Data Type: STRING. The price unit of item 5 of the table. Must match one
of the predefined price units.
ITEM_5_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 5 of the table when using
Upgraded URLs.
ITEM_5_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 5 of the table when
using Upgraded URLs.
ITEM_6_HEADER (int): Data Type: STRING. The header of item 6 of the table.
ITEM_6_DESCRIPTION (int): Data Type: STRING. The description of item 6 of the table.
ITEM_6_PRICE (int): Data Type: MONEY. The price (money with currency) of item 6 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_6_UNIT (int): Data Type: STRING. The price unit of item 6 of the table. Must match one
of the predefined price units.
ITEM_6_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 6 of the table when using
Upgraded URLs.
ITEM_6_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 6 of the table when
using Upgraded URLs.
ITEM_7_HEADER (int): Data Type: STRING. The header of item 7 of the table.
ITEM_7_DESCRIPTION (int): Data Type: STRING. The description of item 7 of the table.
ITEM_7_PRICE (int): Data Type: MONEY. The price (money with currency) of item 7 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_7_UNIT (int): Data Type: STRING. The price unit of item 7 of the table. Must match one
of the predefined price units.
ITEM_7_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 7 of the table when using
Upgraded URLs.
ITEM_7_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 7 of the table when
using Upgraded URLs.
ITEM_8_HEADER (int): Data Type: STRING. The header of item 8 of the table.
ITEM_8_DESCRIPTION (int): Data Type: STRING. The description of item 8 of the table.
ITEM_8_PRICE (int): Data Type: MONEY. The price (money with currency) of item 8 of the table,
e.g., 30 USD. The currency must match one of the available currencies.
ITEM_8_UNIT (int): Data Type: STRING. The price unit of item 8 of the table. Must match one
of the predefined price units.
ITEM_8_FINAL_URLS (int): Data Type: URL\_LIST. The final URLs of item 8 of the table when using
Upgraded URLs.
ITEM_8_FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. The final mobile URLs of item 8 of the table when
using Upgraded URLs.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TYPE = 2
PRICE_QUALIFIER = 3
TRACKING_TEMPLATE = 4
LANGUAGE = 5
FINAL_URL_SUFFIX = 6
ITEM_1_HEADER = 100
ITEM_1_DESCRIPTION = 101
ITEM_1_PRICE = 102
ITEM_1_UNIT = 103
ITEM_1_FINAL_URLS = 104
ITEM_1_FINAL_MOBILE_URLS = 105
ITEM_2_HEADER = 200
ITEM_2_DESCRIPTION = 201
ITEM_2_PRICE = 202
ITEM_2_UNIT = 203
ITEM_2_FINAL_URLS = 204
ITEM_2_FINAL_MOBILE_URLS = 205
ITEM_3_HEADER = 300
ITEM_3_DESCRIPTION = 301
ITEM_3_PRICE = 302
ITEM_3_UNIT = 303
ITEM_3_FINAL_URLS = 304
ITEM_3_FINAL_MOBILE_URLS = 305
ITEM_4_HEADER = 400
ITEM_4_DESCRIPTION = 401
ITEM_4_PRICE = 402
ITEM_4_UNIT = 403
ITEM_4_FINAL_URLS = 404
ITEM_4_FINAL_MOBILE_URLS = 405
ITEM_5_HEADER = 500
ITEM_5_DESCRIPTION = 501
ITEM_5_PRICE = 502
ITEM_5_UNIT = 503
ITEM_5_FINAL_URLS = 504
ITEM_5_FINAL_MOBILE_URLS = 505
ITEM_6_HEADER = 600
ITEM_6_DESCRIPTION = 601
ITEM_6_PRICE = 602
ITEM_6_UNIT = 603
ITEM_6_FINAL_URLS = 604
ITEM_6_FINAL_MOBILE_URLS = 605
ITEM_7_HEADER = 700
ITEM_7_DESCRIPTION = 701
ITEM_7_PRICE = 702
ITEM_7_UNIT = 703
ITEM_7_FINAL_URLS = 704
ITEM_7_FINAL_MOBILE_URLS = 705
ITEM_8_HEADER = 800
ITEM_8_DESCRIPTION = 801
ITEM_8_PRICE = 802
ITEM_8_UNIT = 803
ITEM_8_FINAL_URLS = 804
ITEM_8_FINAL_MOBILE_URLS = 805
'''
PricePlaceholderFieldEnum = PricePlaceholderFieldEnum() # For __getattribute__
class ProductBiddingCategoryLevelEnum(_CreateEnumTypeUponFirstAccess):
ProductBiddingCategoryLevelEnum = '''\
class ProductBiddingCategoryLevel(enum.IntEnum):
"""
Enum describing the level of the product bidding category.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LEVEL1 (int): Level 1.
LEVEL2 (int): Level 2.
LEVEL3 (int): Level 3.
LEVEL4 (int): Level 4.
LEVEL5 (int): Level 5.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 2
LEVEL2 = 3
LEVEL3 = 4
LEVEL4 = 5
LEVEL5 = 6
'''
ProductBiddingCategoryLevelEnum = ProductBiddingCategoryLevelEnum() # For __getattribute__
class ProductBiddingCategoryStatusEnum(_CreateEnumTypeUponFirstAccess):
ProductBiddingCategoryStatusEnum = '''\
class ProductBiddingCategoryStatus(enum.IntEnum):
"""
Enum describing the status of the product bidding category.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ACTIVE (int): The category is active and can be used for bidding.
OBSOLETE (int): The category is obsolete. Used only for reporting purposes.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACTIVE = 2
OBSOLETE = 3
'''
ProductBiddingCategoryStatusEnum = ProductBiddingCategoryStatusEnum() # For __getattribute__
class ProductChannelEnum(_CreateEnumTypeUponFirstAccess):
ProductChannelEnum = '''\
class ProductChannel(enum.IntEnum):
"""
Enum describing the locality of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ONLINE (int): The item is sold online.
LOCAL (int): The item is sold in local stores.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ONLINE = 2
LOCAL = 3
'''
ProductChannelEnum = ProductChannelEnum() # For __getattribute__
class ProductChannelExclusivityEnum(_CreateEnumTypeUponFirstAccess):
ProductChannelExclusivityEnum = '''\
class ProductChannelExclusivity(enum.IntEnum):
"""
Enum describing the availability of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SINGLE_CHANNEL (int): The item is sold through one channel only, either local stores or online
as indicated by its ProductChannel.
MULTI_CHANNEL (int): The item is matched to its online or local stores counterpart, indicating
it is available for purchase in both ShoppingProductChannels.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SINGLE_CHANNEL = 2
MULTI_CHANNEL = 3
'''
ProductChannelExclusivityEnum = ProductChannelExclusivityEnum() # For __getattribute__
class ProductConditionEnum(_CreateEnumTypeUponFirstAccess):
ProductConditionEnum = '''\
class ProductCondition(enum.IntEnum):
"""
Enum describing the condition of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NEW (int): The product condition is new.
REFURBISHED (int): The product condition is refurbished.
USED (int): The product condition is used.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEW = 3
REFURBISHED = 4
USED = 5
'''
ProductConditionEnum = ProductConditionEnum() # For __getattribute__
class ProductCustomAttributeIndexEnum(_CreateEnumTypeUponFirstAccess):
ProductCustomAttributeIndexEnum = '''\
class ProductCustomAttributeIndex(enum.IntEnum):
"""
The index of the product custom attribute.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INDEX0 (int): First product custom attribute.
INDEX1 (int): Second product custom attribute.
INDEX2 (int): Third product custom attribute.
INDEX3 (int): Fourth product custom attribute.
INDEX4 (int): Fifth product custom attribute.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INDEX0 = 7
INDEX1 = 8
INDEX2 = 9
INDEX3 = 10
INDEX4 = 11
'''
ProductCustomAttributeIndexEnum = ProductCustomAttributeIndexEnum() # For __getattribute__
class ProductTypeLevelEnum(_CreateEnumTypeUponFirstAccess):
ProductTypeLevelEnum = '''\
class ProductTypeLevel(enum.IntEnum):
"""
Enum describing the level of the type of a product offer.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LEVEL1 (int): Level 1.
LEVEL2 (int): Level 2.
LEVEL3 (int): Level 3.
LEVEL4 (int): Level 4.
LEVEL5 (int): Level 5.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 7
LEVEL2 = 8
LEVEL3 = 9
LEVEL4 = 10
LEVEL5 = 11
'''
ProductTypeLevelEnum = ProductTypeLevelEnum() # For __getattribute__
class PromotionExtensionDiscountModifierEnum(_CreateEnumTypeUponFirstAccess):
PromotionExtensionDiscountModifierEnum = '''\
class PromotionExtensionDiscountModifier(enum.IntEnum):
"""
A promotion extension discount modifier.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
UP_TO (int): 'Up to'.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UP_TO = 2
'''
PromotionExtensionDiscountModifierEnum = PromotionExtensionDiscountModifierEnum() # For __getattribute__
class PromotionExtensionOccasionEnum(_CreateEnumTypeUponFirstAccess):
PromotionExtensionOccasionEnum = '''\
class PromotionExtensionOccasion(enum.IntEnum):
"""
A promotion extension occasion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NEW_YEARS (int): New Year's.
CHINESE_NEW_YEAR (int): Chinese New Year.
VALENTINES_DAY (int): Valentine's Day.
EASTER (int): Easter.
MOTHERS_DAY (int): Mother's Day.
FATHERS_DAY (int): Father's Day.
LABOR_DAY (int): Labor Day.
BACK_TO_SCHOOL (int): Back To School.
HALLOWEEN (int): Halloween.
BLACK_FRIDAY (int): Black Friday.
CYBER_MONDAY (int): Cyber Monday.
CHRISTMAS (int): Christmas.
BOXING_DAY (int): Boxing Day.
INDEPENDENCE_DAY (int): Independence Day in any country.
NATIONAL_DAY (int): National Day in any country.
END_OF_SEASON (int): End of any season.
WINTER_SALE (int): Winter Sale.
SUMMER_SALE (int): Summer sale.
FALL_SALE (int): Fall Sale.
SPRING_SALE (int): Spring Sale.
RAMADAN (int): Ramadan.
EID_AL_FITR (int): Eid al-Fitr.
EID_AL_ADHA (int): Eid al-Adha.
SINGLES_DAY (int): Singles Day.
WOMENS_DAY (int): Women's Day.
HOLI (int): Holi.
PARENTS_DAY (int): Parent's Day.
ST_NICHOLAS_DAY (int): St. Nicholas Day.
CARNIVAL (int): Carnival.
EPIPHANY (int): Epiphany, also known as Three Kings' Day.
ROSH_HASHANAH (int): Rosh Hashanah.
PASSOVER (int): Passover.
HANUKKAH (int): Hanukkah.
DIWALI (int): Diwali.
NAVRATRI (int): Navratri.
SONGKRAN (int): Available in Thai: Songkran.
YEAR_END_GIFT (int): Available in Japanese: Year-end Gift.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEW_YEARS = 2
CHINESE_NEW_YEAR = 3
VALENTINES_DAY = 4
EASTER = 5
MOTHERS_DAY = 6
FATHERS_DAY = 7
LABOR_DAY = 8
BACK_TO_SCHOOL = 9
HALLOWEEN = 10
BLACK_FRIDAY = 11
CYBER_MONDAY = 12
CHRISTMAS = 13
BOXING_DAY = 14
INDEPENDENCE_DAY = 15
NATIONAL_DAY = 16
END_OF_SEASON = 17
WINTER_SALE = 18
SUMMER_SALE = 19
FALL_SALE = 20
SPRING_SALE = 21
RAMADAN = 22
EID_AL_FITR = 23
EID_AL_ADHA = 24
SINGLES_DAY = 25
WOMENS_DAY = 26
HOLI = 27
PARENTS_DAY = 28
ST_NICHOLAS_DAY = 29
CARNIVAL = 30
EPIPHANY = 31
ROSH_HASHANAH = 32
PASSOVER = 33
HANUKKAH = 34
DIWALI = 35
NAVRATRI = 36
SONGKRAN = 37
YEAR_END_GIFT = 38
'''
PromotionExtensionOccasionEnum = PromotionExtensionOccasionEnum() # For __getattribute__
class PromotionPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
PromotionPlaceholderFieldEnum = '''\
class PromotionPlaceholderField(enum.IntEnum):
"""
Possible values for Promotion placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PROMOTION_TARGET (int): Data Type: STRING. The text that appears on the ad when the extension is
shown.
DISCOUNT_MODIFIER (int): Data Type: STRING. Allows you to add "up to" phrase to the promotion,
in case you have variable promotion rates.
PERCENT_OFF (int): Data Type: INT64. Takes a value in micros, where 1 million micros
represents 1%, and is shown as a percentage when rendered.
MONEY_AMOUNT_OFF (int): Data Type: MONEY. Requires a currency and an amount of money.
PROMOTION_CODE (int): Data Type: STRING. A string that the user enters to get the discount.
ORDERS_OVER_AMOUNT (int): Data Type: MONEY. A minimum spend before the user qualifies for the
promotion.
PROMOTION_START (int): Data Type: DATE. The start date of the promotion.
PROMOTION_END (int): Data Type: DATE. The end date of the promotion.
OCCASION (int): Data Type: STRING. Describes the associated event for the promotion
using one of the PromotionExtensionOccasion enum values, for example
NEW\_YEARS.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs to be used in the ad when using
Upgraded URLs.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
LANGUAGE (int): Data Type: STRING. A string represented by a language code for the
promotion.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for the ad when using parallel
tracking.
"""
UNSPECIFIED = 0
UNKNOWN = 1
PROMOTION_TARGET = 2
DISCOUNT_MODIFIER = 3
PERCENT_OFF = 4
MONEY_AMOUNT_OFF = 5
PROMOTION_CODE = 6
ORDERS_OVER_AMOUNT = 7
PROMOTION_START = 8
PROMOTION_END = 9
OCCASION = 10
FINAL_URLS = 11
FINAL_MOBILE_URLS = 12
TRACKING_URL = 13
LANGUAGE = 14
FINAL_URL_SUFFIX = 15
'''
PromotionPlaceholderFieldEnum = PromotionPlaceholderFieldEnum() # For __getattribute__
class ProximityRadiusUnitsEnum(_CreateEnumTypeUponFirstAccess):
ProximityRadiusUnitsEnum = '''\
class ProximityRadiusUnits(enum.IntEnum):
"""
The unit of radius distance in proximity (e.g. MILES)
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MILES (int): Miles
KILOMETERS (int): Kilometers
"""
UNSPECIFIED = 0
UNKNOWN = 1
MILES = 2
KILOMETERS = 3
'''
ProximityRadiusUnitsEnum = ProximityRadiusUnitsEnum() # For __getattribute__
class QualityScoreBucketEnum(_CreateEnumTypeUponFirstAccess):
QualityScoreBucketEnum = '''\
class QualityScoreBucket(enum.IntEnum):
"""
Enum listing the possible quality score buckets.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BELOW_AVERAGE (int): Quality of the creative is below average.
AVERAGE (int): Quality of the creative is average.
ABOVE_AVERAGE (int): Quality of the creative is above average.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BELOW_AVERAGE = 2
AVERAGE = 3
ABOVE_AVERAGE = 4
'''
QualityScoreBucketEnum = QualityScoreBucketEnum() # For __getattribute__
class QueryErrorEnum(_CreateEnumTypeUponFirstAccess):
QueryErrorEnum = '''\
class QueryError(enum.IntEnum):
"""
Enum describing possible query errors.
Attributes:
UNSPECIFIED (int): Name unspecified.
UNKNOWN (int): The received error code is not known in this version.
QUERY_ERROR (int): Returned if all other query error reasons are not applicable.
BAD_ENUM_CONSTANT (int): A condition used in the query references an invalid enum constant.
BAD_ESCAPE_SEQUENCE (int): Query contains an invalid escape sequence.
BAD_FIELD_NAME (int): Field name is invalid.
BAD_LIMIT_VALUE (int): Limit value is invalid (i.e. not a number)
BAD_NUMBER (int): Encountered number can not be parsed.
BAD_OPERATOR (int): Invalid operator encountered.
BAD_PARAMETER_NAME (int): Parameter unknown or not supported.
BAD_PARAMETER_VALUE (int): Parameter have invalid value.
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE (int): Invalid resource type was specified in the FROM clause.
BAD_SYMBOL (int): Non-ASCII symbol encountered outside of strings.
BAD_VALUE (int): Value is invalid.
DATE_RANGE_TOO_WIDE (int): Date filters fail to restrict date to a range smaller than 31 days.
Applicable if the query is segmented by date.
DATE_RANGE_TOO_NARROW (int): Filters on date/week/month/quarter have a start date after
end date.
EXPECTED_AND (int): Expected AND between values with BETWEEN operator.
EXPECTED_BY (int): Expecting ORDER BY to have BY.
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE (int): There was no dimension field selected.
EXPECTED_FILTERS_ON_DATE_RANGE (int): Missing filters on date related fields.
EXPECTED_FROM (int): Missing FROM clause.
EXPECTED_LIST (int): The operator used in the conditions requires the value to be a list.
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE (int): Fields used in WHERE or ORDER BY clauses are missing from the SELECT
clause.
EXPECTED_SELECT (int): SELECT is missing at the beginning of query.
EXPECTED_SINGLE_VALUE (int): A list was passed as a value to a condition whose operator expects a
single value.
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR (int): Missing one or both values with BETWEEN operator.
INVALID_DATE_FORMAT (int): Invalid date format. Expected 'YYYY-MM-DD'.
INVALID_STRING_VALUE (int): Value passed was not a string when it should have been. I.e., it was a
number or unquoted literal.
INVALID_VALUE_WITH_BETWEEN_OPERATOR (int): A String value passed to the BETWEEN operator does not parse as a date.
INVALID_VALUE_WITH_DURING_OPERATOR (int): The value passed to the DURING operator is not a Date range literal
INVALID_VALUE_WITH_LIKE_OPERATOR (int): A non-string value was passed to the LIKE operator.
OPERATOR_FIELD_MISMATCH (int): An operator was provided that is inapplicable to the field being
filtered.
PROHIBITED_EMPTY_LIST_IN_CONDITION (int): A Condition was found with an empty list.
PROHIBITED_ENUM_CONSTANT (int): A condition used in the query references an unsupported enum constant.
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE (int): Fields that are not allowed to be selected together were included in
the SELECT clause.
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE (int): A field that is not orderable was included in the ORDER BY clause.
PROHIBITED_FIELD_IN_SELECT_CLAUSE (int): A field that is not selectable was included in the SELECT clause.
PROHIBITED_FIELD_IN_WHERE_CLAUSE (int): A field that is not filterable was included in the WHERE clause.
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE (int): Resource type specified in the FROM clause is not supported by this
service.
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE (int): A field that comes from an incompatible resource was included in the
SELECT clause.
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE (int): A field that comes from an incompatible resource was included in the
WHERE clause.
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE (int): A metric incompatible with the main resource or other selected
segmenting resources was included in the SELECT or WHERE clause.
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE (int): A segment incompatible with the main resource or other selected
segmenting resources was included in the SELECT or WHERE clause.
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE (int): A segment in the SELECT clause is incompatible with a metric in the
SELECT or WHERE clause.
LIMIT_VALUE_TOO_LOW (int): The value passed to the limit clause is too low.
PROHIBITED_NEWLINE_IN_STRING (int): Query has a string containing a newline character.
PROHIBITED_VALUE_COMBINATION_IN_LIST (int): List contains values of different types.
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR (int): The values passed to the BETWEEN operator are not of the same type.
STRING_NOT_TERMINATED (int): Query contains unterminated string.
TOO_MANY_SEGMENTS (int): Too many segments are specified in SELECT clause.
UNEXPECTED_END_OF_QUERY (int): Query is incomplete and cannot be parsed.
UNEXPECTED_FROM_CLAUSE (int): FROM clause cannot be specified in this query.
UNRECOGNIZED_FIELD (int): Query contains one or more unrecognized fields.
UNEXPECTED_INPUT (int): Query has an unexpected extra part.
REQUESTED_METRICS_FOR_MANAGER (int): Metrics cannot be requested for a manager account. To retrieve metrics,
issue separate requests against each client account under the manager
account.
"""
UNSPECIFIED = 0
UNKNOWN = 1
QUERY_ERROR = 50
BAD_ENUM_CONSTANT = 18
BAD_ESCAPE_SEQUENCE = 7
BAD_FIELD_NAME = 12
BAD_LIMIT_VALUE = 15
BAD_NUMBER = 5
BAD_OPERATOR = 3
BAD_PARAMETER_NAME = 61
BAD_PARAMETER_VALUE = 62
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE = 45
BAD_SYMBOL = 2
BAD_VALUE = 4
DATE_RANGE_TOO_WIDE = 36
DATE_RANGE_TOO_NARROW = 60
EXPECTED_AND = 30
EXPECTED_BY = 14
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE = 37
EXPECTED_FILTERS_ON_DATE_RANGE = 55
EXPECTED_FROM = 44
EXPECTED_LIST = 41
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE = 16
EXPECTED_SELECT = 13
EXPECTED_SINGLE_VALUE = 42
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR = 29
INVALID_DATE_FORMAT = 38
INVALID_STRING_VALUE = 57
INVALID_VALUE_WITH_BETWEEN_OPERATOR = 26
INVALID_VALUE_WITH_DURING_OPERATOR = 22
INVALID_VALUE_WITH_LIKE_OPERATOR = 56
OPERATOR_FIELD_MISMATCH = 35
PROHIBITED_EMPTY_LIST_IN_CONDITION = 28
PROHIBITED_ENUM_CONSTANT = 54
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE = 31
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE = 40
PROHIBITED_FIELD_IN_SELECT_CLAUSE = 23
PROHIBITED_FIELD_IN_WHERE_CLAUSE = 24
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE = 43
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE = 48
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE = 58
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 49
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE = 51
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 53
LIMIT_VALUE_TOO_LOW = 25
PROHIBITED_NEWLINE_IN_STRING = 8
PROHIBITED_VALUE_COMBINATION_IN_LIST = 10
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR = 21
STRING_NOT_TERMINATED = 6
TOO_MANY_SEGMENTS = 34
UNEXPECTED_END_OF_QUERY = 9
UNEXPECTED_FROM_CLAUSE = 47
UNRECOGNIZED_FIELD = 32
UNEXPECTED_INPUT = 11
REQUESTED_METRICS_FOR_MANAGER = 59
'''
QueryErrorEnum = QueryErrorEnum() # For __getattribute__
class QuotaErrorEnum(_CreateEnumTypeUponFirstAccess):
QuotaErrorEnum = '''\
class QuotaError(enum.IntEnum):
"""
Enum describing possible quota errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
RESOURCE_EXHAUSTED (int): Too many requests.
ACCESS_PROHIBITED (int): Access is prohibited.
RESOURCE_TEMPORARILY_EXHAUSTED (int): Too many requests in a short amount of time.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_EXHAUSTED = 2
ACCESS_PROHIBITED = 3
RESOURCE_TEMPORARILY_EXHAUSTED = 4
'''
QuotaErrorEnum = QuotaErrorEnum() # For __getattribute__
class RangeErrorEnum(_CreateEnumTypeUponFirstAccess):
RangeErrorEnum = '''\
class RangeError(enum.IntEnum):
"""
Enum describing possible range errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
TOO_LOW (int): Too low.
TOO_HIGH (int): Too high.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TOO_LOW = 2
TOO_HIGH = 3
'''
RangeErrorEnum = RangeErrorEnum() # For __getattribute__
class ReachPlanAdLengthEnum(_CreateEnumTypeUponFirstAccess):
ReachPlanAdLengthEnum = '''\
class ReachPlanAdLength(enum.IntEnum):
"""
Possible ad length values.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
SIX_SECONDS (int): 6 seconds long ad.
FIFTEEN_OR_TWENTY_SECONDS (int): 15 or 20 seconds long ad.
TWENTY_SECONDS_OR_MORE (int): More than 20 seconds long ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SIX_SECONDS = 2
FIFTEEN_OR_TWENTY_SECONDS = 3
TWENTY_SECONDS_OR_MORE = 4
'''
ReachPlanAdLengthEnum = ReachPlanAdLengthEnum() # For __getattribute__
class ReachPlanAgeRangeEnum(_CreateEnumTypeUponFirstAccess):
ReachPlanAgeRangeEnum = '''\
class ReachPlanAgeRange(enum.IntEnum):
"""
Possible plannable age range values.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
AGE_RANGE_18_24 (int): Between 18 and 24 years old.
AGE_RANGE_18_34 (int): Between 18 and 34 years old.
AGE_RANGE_18_44 (int): Between 18 and 44 years old.
AGE_RANGE_18_49 (int): Between 18 and 49 years old.
AGE_RANGE_18_54 (int): Between 18 and 54 years old.
AGE_RANGE_18_64 (int): Between 18 and 64 years old.
AGE_RANGE_18_65_UP (int): Between 18 and 65+ years old.
AGE_RANGE_21_34 (int): Between 21 and 34 years old.
AGE_RANGE_25_34 (int): Between 25 and 34 years old.
AGE_RANGE_25_44 (int): Between 25 and 44 years old.
AGE_RANGE_25_49 (int): Between 25 and 49 years old.
AGE_RANGE_25_54 (int): Between 25 and 54 years old.
AGE_RANGE_25_64 (int): Between 25 and 64 years old.
AGE_RANGE_25_65_UP (int): Between 25 and 65+ years old.
AGE_RANGE_35_44 (int): Between 35 and 44 years old.
AGE_RANGE_35_49 (int): Between 35 and 49 years old.
AGE_RANGE_35_54 (int): Between 35 and 54 years old.
AGE_RANGE_35_64 (int): Between 35 and 64 years old.
AGE_RANGE_35_65_UP (int): Between 35 and 65+ years old.
AGE_RANGE_45_54 (int): Between 45 and 54 years old.
AGE_RANGE_45_64 (int): Between 45 and 64 years old.
AGE_RANGE_45_65_UP (int): Between 45 and 65+ years old.
AGE_RANGE_50_65_UP (int): Between 50 and 65+ years old.
AGE_RANGE_55_64 (int): Between 55 and 64 years old.
AGE_RANGE_55_65_UP (int): Between 55 and 65+ years old.
AGE_RANGE_65_UP (int): 65 years old and beyond.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AGE_RANGE_18_24 = 503001
AGE_RANGE_18_34 = 2
AGE_RANGE_18_44 = 3
AGE_RANGE_18_49 = 4
AGE_RANGE_18_54 = 5
AGE_RANGE_18_64 = 6
AGE_RANGE_18_65_UP = 7
AGE_RANGE_21_34 = 8
AGE_RANGE_25_34 = 503002
AGE_RANGE_25_44 = 9
AGE_RANGE_25_49 = 10
AGE_RANGE_25_54 = 11
AGE_RANGE_25_64 = 12
AGE_RANGE_25_65_UP = 13
AGE_RANGE_35_44 = 503003
AGE_RANGE_35_49 = 14
AGE_RANGE_35_54 = 15
AGE_RANGE_35_64 = 16
AGE_RANGE_35_65_UP = 17
AGE_RANGE_45_54 = 503004
AGE_RANGE_45_64 = 18
AGE_RANGE_45_65_UP = 19
AGE_RANGE_50_65_UP = 20
AGE_RANGE_55_64 = 503005
AGE_RANGE_55_65_UP = 21
AGE_RANGE_65_UP = 503006
'''
ReachPlanAgeRangeEnum = ReachPlanAgeRangeEnum() # For __getattribute__
class ReachPlanErrorEnum(_CreateEnumTypeUponFirstAccess):
ReachPlanErrorEnum = '''\
class ReachPlanError(enum.IntEnum):
"""
Enum describing possible errors from ReachPlanService.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
"""
UNSPECIFIED = 0
UNKNOWN = 1
'''
ReachPlanErrorEnum = ReachPlanErrorEnum() # For __getattribute__
class ReachPlanNetworkEnum(_CreateEnumTypeUponFirstAccess):
ReachPlanNetworkEnum = '''\
class ReachPlanNetwork(enum.IntEnum):
"""
Possible plannable network values.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used as a return value only. Represents value unknown in this version.
YOUTUBE (int): YouTube network.
GOOGLE_VIDEO_PARTNERS (int): Google Video Partners (GVP) network.
YOUTUBE_AND_GOOGLE_VIDEO_PARTNERS (int): A combination of the YouTube network and the Google Video Partners
network.
"""
UNSPECIFIED = 0
UNKNOWN = 1
YOUTUBE = 2
GOOGLE_VIDEO_PARTNERS = 3
YOUTUBE_AND_GOOGLE_VIDEO_PARTNERS = 4
'''
ReachPlanNetworkEnum = ReachPlanNetworkEnum() # For __getattribute__
class RealEstatePlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
RealEstatePlaceholderFieldEnum = '''\
class RealEstatePlaceholderField(enum.IntEnum):
"""
Possible values for Real Estate placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LISTING_ID (int): Data Type: STRING. Unique ID.
LISTING_NAME (int): Data Type: STRING. Main headline with listing name to be shown in dynamic
ad.
CITY_NAME (int): Data Type: STRING. City name to be shown in dynamic ad.
DESCRIPTION (int): Data Type: STRING. Description of listing to be shown in dynamic ad.
ADDRESS (int): Data Type: STRING. Complete listing address, including postal code.
PRICE (int): Data Type: STRING. Price to be shown in the ad.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
PROPERTY_TYPE (int): Data Type: STRING. Type of property (house, condo, apartment, etc.) used
to group like items together for recommendation engine.
LISTING_TYPE (int): Data Type: STRING. Type of listing (resale, rental, foreclosure, etc.)
used to group like items together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs to be used in ad when using Upgraded
URLs; the more specific the better (e.g. the individual URL of a
specific listing and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_LISTING_IDS (int): Data Type: STRING\_LIST. List of recommended listing IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LISTING_ID = 2
LISTING_NAME = 3
CITY_NAME = 4
DESCRIPTION = 5
ADDRESS = 6
PRICE = 7
FORMATTED_PRICE = 8
IMAGE_URL = 9
PROPERTY_TYPE = 10
LISTING_TYPE = 11
CONTEXTUAL_KEYWORDS = 12
FINAL_URLS = 13
FINAL_MOBILE_URLS = 14
TRACKING_URL = 15
ANDROID_APP_LINK = 16
SIMILAR_LISTING_IDS = 17
IOS_APP_LINK = 18
IOS_APP_STORE_ID = 19
'''
RealEstatePlaceholderFieldEnum = RealEstatePlaceholderFieldEnum() # For __getattribute__
class RecommendationErrorEnum(_CreateEnumTypeUponFirstAccess):
RecommendationErrorEnum = '''\
class RecommendationError(enum.IntEnum):
"""
Enum describing possible errors from applying a recommendation.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
BUDGET_AMOUNT_TOO_SMALL (int): The specified budget amount is too low e.g. lower than minimum currency
unit or lower than ad group minimum cost-per-click.
BUDGET_AMOUNT_TOO_LARGE (int): The specified budget amount is too large.
INVALID_BUDGET_AMOUNT (int): The specified budget amount is not a valid amount. e.g. not a multiple
of minimum currency unit.
POLICY_ERROR (int): The specified keyword or ad violates ad policy.
INVALID_BID_AMOUNT (int): The specified bid amount is not valid. e.g. too many fractional digits,
or negative amount.
ADGROUP_KEYWORD_LIMIT (int): The number of keywords in ad group have reached the maximum allowed.
RECOMMENDATION_ALREADY_APPLIED (int): The recommendation requested to apply has already been applied.
RECOMMENDATION_INVALIDATED (int): The recommendation requested to apply has been invalidated.
TOO_MANY_OPERATIONS (int): The number of operations in a single request exceeds the maximum allowed.
NO_OPERATIONS (int): There are no operations in the request.
DIFFERENT_TYPES_NOT_SUPPORTED (int): Operations with multiple recommendation types are not supported when
partial failure mode is not enabled.
DUPLICATE_RESOURCE_NAME (int): Request contains multiple operations with the same resource\_name.
RECOMMENDATION_ALREADY_DISMISSED (int): The recommendation requested to dismiss has already been dismissed.
INVALID_APPLY_REQUEST (int): The recommendation apply request was malformed and invalid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
BUDGET_AMOUNT_TOO_SMALL = 2
BUDGET_AMOUNT_TOO_LARGE = 3
INVALID_BUDGET_AMOUNT = 4
POLICY_ERROR = 5
INVALID_BID_AMOUNT = 6
ADGROUP_KEYWORD_LIMIT = 7
RECOMMENDATION_ALREADY_APPLIED = 8
RECOMMENDATION_INVALIDATED = 9
TOO_MANY_OPERATIONS = 10
NO_OPERATIONS = 11
DIFFERENT_TYPES_NOT_SUPPORTED = 12
DUPLICATE_RESOURCE_NAME = 13
RECOMMENDATION_ALREADY_DISMISSED = 14
INVALID_APPLY_REQUEST = 15
'''
RecommendationErrorEnum = RecommendationErrorEnum() # For __getattribute__
class RecommendationTypeEnum(_CreateEnumTypeUponFirstAccess):
RecommendationTypeEnum = '''\
class RecommendationType(enum.IntEnum):
"""
Types of recommendations.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CAMPAIGN_BUDGET (int): Budget recommendation for budget constrained campaigns.
KEYWORD (int): Keyword recommendation.
TEXT_AD (int): Recommendation to add a new text ad.
TARGET_CPA_OPT_IN (int): Recommendation to update a campaign to use a Target CPA bidding strategy.
MAXIMIZE_CONVERSIONS_OPT_IN (int): Recommendation to update a campaign to use the Maximize Conversions
bidding strategy.
ENHANCED_CPC_OPT_IN (int): Recommendation to enable Enhanced Cost Per Click for a campaign.
SEARCH_PARTNERS_OPT_IN (int): Recommendation to start showing your campaign's ads on Google Search
Partners Websites.
MAXIMIZE_CLICKS_OPT_IN (int): Recommendation to update a campaign to use a Maximize Clicks bidding
strategy.
OPTIMIZE_AD_ROTATION (int): Recommendation to start using the "Optimize" ad rotation setting for the
given ad group.
CALLOUT_EXTENSION (int): Recommendation to add callout extensions to a campaign.
SITELINK_EXTENSION (int): Recommendation to add sitelink extensions to a campaign.
CALL_EXTENSION (int): Recommendation to add call extensions to a campaign.
KEYWORD_MATCH_TYPE (int): Recommendation to change an existing keyword from one match type to a
broader match type.
MOVE_UNUSED_BUDGET (int): Recommendation to move unused budget from one budget to a constrained
budget.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_BUDGET = 2
KEYWORD = 3
TEXT_AD = 4
TARGET_CPA_OPT_IN = 5
MAXIMIZE_CONVERSIONS_OPT_IN = 6
ENHANCED_CPC_OPT_IN = 7
SEARCH_PARTNERS_OPT_IN = 8
MAXIMIZE_CLICKS_OPT_IN = 9
OPTIMIZE_AD_ROTATION = 10
CALLOUT_EXTENSION = 11
SITELINK_EXTENSION = 12
CALL_EXTENSION = 13
KEYWORD_MATCH_TYPE = 14
MOVE_UNUSED_BUDGET = 15
'''
RecommendationTypeEnum = RecommendationTypeEnum() # For __getattribute__
class RegionCodeErrorEnum(_CreateEnumTypeUponFirstAccess):
RegionCodeErrorEnum = '''\
class RegionCodeError(enum.IntEnum):
"""
Enum describing possible region code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_REGION_CODE (int): Invalid region code.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_REGION_CODE = 2
'''
RegionCodeErrorEnum = RegionCodeErrorEnum() # For __getattribute__
class RequestErrorEnum(_CreateEnumTypeUponFirstAccess):
RequestErrorEnum = '''\
class RequestError(enum.IntEnum):
"""
Enum describing possible request errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
RESOURCE_NAME_MISSING (int): Resource name is required for this request.
RESOURCE_NAME_MALFORMED (int): Resource name provided is malformed.
BAD_RESOURCE_ID (int): Resource name provided is malformed.
INVALID_CUSTOMER_ID (int): Customer ID is invalid.
OPERATION_REQUIRED (int): Mutate operation should have either create, update, or remove specified.
RESOURCE_NOT_FOUND (int): Requested resource not found.
INVALID_PAGE_TOKEN (int): Next page token specified in user request is invalid.
EXPIRED_PAGE_TOKEN (int): Next page token specified in user request has expired.
INVALID_PAGE_SIZE (int): Page size specified in user request is invalid.
REQUIRED_FIELD_MISSING (int): Required field is missing.
IMMUTABLE_FIELD (int): The field cannot be modified because it's immutable. It's also possible
that the field can be modified using 'create' operation but not 'update'.
TOO_MANY_MUTATE_OPERATIONS (int): Received too many entries in request.
CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT (int): Request cannot be executed by a manager account.
CANNOT_MODIFY_FOREIGN_FIELD (int): Mutate request was attempting to modify a readonly field.
For instance, Budget fields can be requested for Ad Group,
but are read-only for adGroups:mutate.
INVALID_ENUM_VALUE (int): Enum value is not permitted.
DEVELOPER_TOKEN_PARAMETER_MISSING (int): The developer-token parameter is required for all requests.
LOGIN_CUSTOMER_ID_PARAMETER_MISSING (int): The login-customer-id parameter is required for this request.
VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN (int): page\_token is set in the validate only request
CANNOT_RETURN_SUMMARY_ROW_FOR_REQUEST_WITHOUT_METRICS (int): return\_summary\_row cannot be enabled if request did not select any
metrics field.
CANNOT_RETURN_SUMMARY_ROW_FOR_VALIDATE_ONLY_REQUESTS (int): return\_summary\_row should not be enabled for validate only requests.
INCONSISTENT_RETURN_SUMMARY_ROW_VALUE (int): return\_summary\_row parameter value should be the same between requests
with page\_token field set and their original request.
TOTAL_RESULTS_COUNT_NOT_ORIGINALLY_REQUESTED (int): The total results count cannot be returned if it was not requested in the
original request.
"""
UNSPECIFIED = 0
UNKNOWN = 1
RESOURCE_NAME_MISSING = 3
RESOURCE_NAME_MALFORMED = 4
BAD_RESOURCE_ID = 17
INVALID_CUSTOMER_ID = 16
OPERATION_REQUIRED = 5
RESOURCE_NOT_FOUND = 6
INVALID_PAGE_TOKEN = 7
EXPIRED_PAGE_TOKEN = 8
INVALID_PAGE_SIZE = 22
REQUIRED_FIELD_MISSING = 9
IMMUTABLE_FIELD = 11
TOO_MANY_MUTATE_OPERATIONS = 13
CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT = 14
CANNOT_MODIFY_FOREIGN_FIELD = 15
INVALID_ENUM_VALUE = 18
DEVELOPER_TOKEN_PARAMETER_MISSING = 19
LOGIN_CUSTOMER_ID_PARAMETER_MISSING = 20
VALIDATE_ONLY_REQUEST_HAS_PAGE_TOKEN = 21
CANNOT_RETURN_SUMMARY_ROW_FOR_REQUEST_WITHOUT_METRICS = 29
CANNOT_RETURN_SUMMARY_ROW_FOR_VALIDATE_ONLY_REQUESTS = 30
INCONSISTENT_RETURN_SUMMARY_ROW_VALUE = 31
TOTAL_RESULTS_COUNT_NOT_ORIGINALLY_REQUESTED = 32
'''
RequestErrorEnum = RequestErrorEnum() # For __getattribute__
class ResourceAccessDeniedErrorEnum(_CreateEnumTypeUponFirstAccess):
ResourceAccessDeniedErrorEnum = '''\
class ResourceAccessDeniedError(enum.IntEnum):
"""
Enum describing possible resource access denied errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
WRITE_ACCESS_DENIED (int): User did not have write access.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WRITE_ACCESS_DENIED = 3
'''
ResourceAccessDeniedErrorEnum = ResourceAccessDeniedErrorEnum() # For __getattribute__
class ResourceCountLimitExceededErrorEnum(_CreateEnumTypeUponFirstAccess):
ResourceCountLimitExceededErrorEnum = '''\
class ResourceCountLimitExceededError(enum.IntEnum):
"""
Enum describing possible resource count limit exceeded errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ACCOUNT_LIMIT (int): Indicates that this request would exceed the number of allowed resources
for the Google Ads account. The exact resource type and limit being
checked can be inferred from accountLimitType.
CAMPAIGN_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in a Campaign. The exact resource type and limit being checked can be
inferred from accountLimitType, and the numeric id of the
Campaign involved is given by enclosingId.
ADGROUP_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in an ad group. The exact resource type and limit being checked can be
inferred from accountLimitType, and the numeric id of the
ad group involved is given by enclosingId.
AD_GROUP_AD_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in an ad group ad. The exact resource type and limit being checked can
be inferred from accountLimitType, and the enclosingId
contains the ad group id followed by the ad id, separated by a single
comma (,).
AD_GROUP_CRITERION_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in an ad group criterion. The exact resource type and limit being checked
can be inferred from accountLimitType, and the
enclosingId contains the ad group id followed by the
criterion id, separated by a single comma (,).
SHARED_SET_LIMIT (int): Indicates that this request would exceed the number of allowed resources
in this shared set. The exact resource type and limit being checked can
be inferred from accountLimitType, and the numeric id of the
shared set involved is given by enclosingId.
MATCHING_FUNCTION_LIMIT (int): Exceeds a limit related to a matching function.
RESPONSE_ROW_LIMIT_EXCEEDED (int): The response for this request would exceed the maximum number of rows
that can be returned.
RESOURCE_LIMIT (int): This request would exceed a limit on the number of allowed resources.
The details of which type of limit was exceeded will eventually be
returned in ErrorDetails.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACCOUNT_LIMIT = 2
CAMPAIGN_LIMIT = 3
ADGROUP_LIMIT = 4
AD_GROUP_AD_LIMIT = 5
AD_GROUP_CRITERION_LIMIT = 6
SHARED_SET_LIMIT = 7
MATCHING_FUNCTION_LIMIT = 8
RESPONSE_ROW_LIMIT_EXCEEDED = 9
RESOURCE_LIMIT = 10
'''
ResourceCountLimitExceededErrorEnum = ResourceCountLimitExceededErrorEnum() # For __getattribute__
class SearchEngineResultsPageTypeEnum(_CreateEnumTypeUponFirstAccess):
SearchEngineResultsPageTypeEnum = '''\
class SearchEngineResultsPageType(enum.IntEnum):
"""
The type of the search engine results page.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADS_ONLY (int): Only ads were contained in the search engine results page.
ORGANIC_ONLY (int): Only organic results were contained in the search engine results page.
ADS_AND_ORGANIC (int): Both ads and organic results were contained in the search engine results
page.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADS_ONLY = 2
ORGANIC_ONLY = 3
ADS_AND_ORGANIC = 4
'''
SearchEngineResultsPageTypeEnum = SearchEngineResultsPageTypeEnum() # For __getattribute__
class SearchTermMatchTypeEnum(_CreateEnumTypeUponFirstAccess):
SearchTermMatchTypeEnum = '''\
class SearchTermMatchType(enum.IntEnum):
"""
Possible match types for a keyword triggering an ad, including variants.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
BROAD (int): Broad match.
EXACT (int): Exact match.
PHRASE (int): Phrase match.
NEAR_EXACT (int): Exact match (close variant).
NEAR_PHRASE (int): Phrase match (close variant).
"""
UNSPECIFIED = 0
UNKNOWN = 1
BROAD = 2
EXACT = 3
PHRASE = 4
NEAR_EXACT = 5
NEAR_PHRASE = 6
'''
SearchTermMatchTypeEnum = SearchTermMatchTypeEnum() # For __getattribute__
class SearchTermTargetingStatusEnum(_CreateEnumTypeUponFirstAccess):
SearchTermTargetingStatusEnum = '''\
class SearchTermTargetingStatus(enum.IntEnum):
"""
Indicates whether the search term is one of your targeted or excluded
keywords.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADDED (int): Search term is added to targeted keywords.
EXCLUDED (int): Search term matches a negative keyword.
ADDED_EXCLUDED (int): Search term has been both added and excluded.
NONE (int): Search term is neither targeted nor excluded.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADDED = 2
EXCLUDED = 3
ADDED_EXCLUDED = 4
NONE = 5
'''
SearchTermTargetingStatusEnum = SearchTermTargetingStatusEnum() # For __getattribute__
class ServedAssetFieldTypeEnum(_CreateEnumTypeUponFirstAccess):
ServedAssetFieldTypeEnum = '''\
class ServedAssetFieldType(enum.IntEnum):
"""
The possible asset field types.
Attributes:
UNSPECIFIED (int): No value has been specified.
UNKNOWN (int): The received value is not known in this version.
This is a response-only value.
HEADLINE_1 (int): The asset is used in headline 1.
HEADLINE_2 (int): The asset is used in headline 2.
HEADLINE_3 (int): The asset is used in headline 3.
DESCRIPTION_1 (int): The asset is used in description 1.
DESCRIPTION_2 (int): The asset is used in description 2.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HEADLINE_1 = 2
HEADLINE_2 = 3
HEADLINE_3 = 4
DESCRIPTION_1 = 5
DESCRIPTION_2 = 6
'''
ServedAssetFieldTypeEnum = ServedAssetFieldTypeEnum() # For __getattribute__
class SettingErrorEnum(_CreateEnumTypeUponFirstAccess):
SettingErrorEnum = '''\
class SettingError(enum.IntEnum):
"""
Enum describing possible setting errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
SETTING_TYPE_IS_NOT_AVAILABLE (int): The campaign setting is not available for this Google Ads account.
SETTING_TYPE_IS_NOT_COMPATIBLE_WITH_CAMPAIGN (int): The setting is not compatible with the campaign.
TARGETING_SETTING_CONTAINS_INVALID_CRITERION_TYPE_GROUP (int): The supplied TargetingSetting contains an invalid CriterionTypeGroup. See
CriterionTypeGroup documentation for CriterionTypeGroups allowed
in Campaign or AdGroup TargetingSettings.
TARGETING_SETTING_DEMOGRAPHIC_CRITERION_TYPE_GROUPS_MUST_BE_SET_TO_TARGET_ALL (int): TargetingSetting must not explicitly set any of the Demographic
CriterionTypeGroups (AGE\_RANGE, GENDER, PARENT, INCOME\_RANGE) to false
(it's okay to not set them at all, in which case the system will set
them to true automatically).
TARGETING_SETTING_CANNOT_CHANGE_TARGET_ALL_TO_FALSE_FOR_DEMOGRAPHIC_CRITERION_TYPE_GROUP (int): TargetingSetting cannot change any of the Demographic
CriterionTypeGroups (AGE\_RANGE, GENDER, PARENT, INCOME\_RANGE) from
true to false.
DYNAMIC_SEARCH_ADS_SETTING_AT_LEAST_ONE_FEED_ID_MUST_BE_PRESENT (int): At least one feed id should be present.
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_DOMAIN_NAME (int): The supplied DynamicSearchAdsSetting contains an invalid domain name.
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_SUBDOMAIN_NAME (int): The supplied DynamicSearchAdsSetting contains a subdomain name.
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_LANGUAGE_CODE (int): The supplied DynamicSearchAdsSetting contains an invalid language code.
TARGET_ALL_IS_NOT_ALLOWED_FOR_PLACEMENT_IN_SEARCH_CAMPAIGN (int): TargetingSettings in search campaigns should not have
CriterionTypeGroup.PLACEMENT set to targetAll.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SETTING_TYPE_IS_NOT_AVAILABLE = 3
SETTING_TYPE_IS_NOT_COMPATIBLE_WITH_CAMPAIGN = 4
TARGETING_SETTING_CONTAINS_INVALID_CRITERION_TYPE_GROUP = 5
TARGETING_SETTING_DEMOGRAPHIC_CRITERION_TYPE_GROUPS_MUST_BE_SET_TO_TARGET_ALL = 6
TARGETING_SETTING_CANNOT_CHANGE_TARGET_ALL_TO_FALSE_FOR_DEMOGRAPHIC_CRITERION_TYPE_GROUP = 7
DYNAMIC_SEARCH_ADS_SETTING_AT_LEAST_ONE_FEED_ID_MUST_BE_PRESENT = 8
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_DOMAIN_NAME = 9
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_SUBDOMAIN_NAME = 10
DYNAMIC_SEARCH_ADS_SETTING_CONTAINS_INVALID_LANGUAGE_CODE = 11
TARGET_ALL_IS_NOT_ALLOWED_FOR_PLACEMENT_IN_SEARCH_CAMPAIGN = 12
'''
SettingErrorEnum = SettingErrorEnum() # For __getattribute__
class SharedCriterionErrorEnum(_CreateEnumTypeUponFirstAccess):
SharedCriterionErrorEnum = '''\
class SharedCriterionError(enum.IntEnum):
"""
Enum describing possible shared criterion errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CRITERION_TYPE_NOT_ALLOWED_FOR_SHARED_SET_TYPE (int): The criterion is not appropriate for the shared set type.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CRITERION_TYPE_NOT_ALLOWED_FOR_SHARED_SET_TYPE = 2
'''
SharedCriterionErrorEnum = SharedCriterionErrorEnum() # For __getattribute__
class SharedSetErrorEnum(_CreateEnumTypeUponFirstAccess):
SharedSetErrorEnum = '''\
class SharedSetError(enum.IntEnum):
"""
Enum describing possible shared set errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE (int): The customer cannot create this type of shared set.
DUPLICATE_NAME (int): A shared set with this name already exists.
SHARED_SET_REMOVED (int): Removed shared sets cannot be mutated.
SHARED_SET_IN_USE (int): The shared set cannot be removed because it is in use.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE = 2
DUPLICATE_NAME = 3
SHARED_SET_REMOVED = 4
SHARED_SET_IN_USE = 5
'''
SharedSetErrorEnum = SharedSetErrorEnum() # For __getattribute__
class SharedSetStatusEnum(_CreateEnumTypeUponFirstAccess):
SharedSetStatusEnum = '''\
class SharedSetStatus(enum.IntEnum):
"""
Enum listing the possible shared set statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The shared set is enabled.
REMOVED (int): The shared set is removed and can no longer be used.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
'''
SharedSetStatusEnum = SharedSetStatusEnum() # For __getattribute__
class SharedSetTypeEnum(_CreateEnumTypeUponFirstAccess):
SharedSetTypeEnum = '''\
class SharedSetType(enum.IntEnum):
"""
Enum listing the possible shared set types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NEGATIVE_KEYWORDS (int): A set of keywords that can be excluded from targeting.
NEGATIVE_PLACEMENTS (int): A set of placements that can be excluded from targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NEGATIVE_KEYWORDS = 2
NEGATIVE_PLACEMENTS = 3
'''
SharedSetTypeEnum = SharedSetTypeEnum() # For __getattribute__
class SimulationModificationMethodEnum(_CreateEnumTypeUponFirstAccess):
SimulationModificationMethodEnum = '''\
class SimulationModificationMethod(enum.IntEnum):
"""
Enum describing the method by which a simulation modifies a field.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
UNIFORM (int): The values in a simulation were applied to all children of a given
resource uniformly. Overrides on child resources were not respected.
DEFAULT (int): The values in a simulation were applied to the given resource.
Overrides on child resources were respected, and traffic estimates
do not include these resources.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNIFORM = 2
DEFAULT = 3
'''
SimulationModificationMethodEnum = SimulationModificationMethodEnum() # For __getattribute__
class SimulationTypeEnum(_CreateEnumTypeUponFirstAccess):
SimulationTypeEnum = '''\
class SimulationType(enum.IntEnum):
"""
Enum describing the field a simulation modifies.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CPC_BID (int): The simulation is for a cpc bid.
CPV_BID (int): The simulation is for a cpv bid.
TARGET_CPA (int): The simulation is for a cpa target.
BID_MODIFIER (int): The simulation is for a bid modifier.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CPC_BID = 2
CPV_BID = 3
TARGET_CPA = 4
BID_MODIFIER = 5
'''
SimulationTypeEnum = SimulationTypeEnum() # For __getattribute__
class SitelinkPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
SitelinkPlaceholderFieldEnum = '''\
class SitelinkPlaceholderField(enum.IntEnum):
"""
Possible values for Sitelink placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
TEXT (int): Data Type: STRING. The link text for your sitelink.
LINE_1 (int): Data Type: STRING. First line of the sitelink description.
LINE_2 (int): Data Type: STRING. Second line of the sitelink description.
FINAL_URLS (int): Data Type: URL\_LIST. Final URLs for the sitelink when using Upgraded
URLs.
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final Mobile URLs for the sitelink when using
Upgraded URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the sitelink when using Upgraded
URLs.
FINAL_URL_SUFFIX (int): Data Type: STRING. Final URL suffix for sitelink when using parallel
tracking.
"""
UNSPECIFIED = 0
UNKNOWN = 1
TEXT = 2
LINE_1 = 3
LINE_2 = 4
FINAL_URLS = 5
FINAL_MOBILE_URLS = 6
TRACKING_URL = 7
FINAL_URL_SUFFIX = 8
'''
SitelinkPlaceholderFieldEnum = SitelinkPlaceholderFieldEnum() # For __getattribute__
class SizeLimitErrorEnum(_CreateEnumTypeUponFirstAccess):
SizeLimitErrorEnum = '''\
class SizeLimitError(enum.IntEnum):
"""
Enum describing possible size limit errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
REQUEST_SIZE_LIMIT_EXCEEDED (int): The number of entries in the request exceeds the system limit.
RESPONSE_SIZE_LIMIT_EXCEEDED (int): The number of entries in the response exceeds the system limit.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUEST_SIZE_LIMIT_EXCEEDED = 2
RESPONSE_SIZE_LIMIT_EXCEEDED = 3
'''
SizeLimitErrorEnum = SizeLimitErrorEnum() # For __getattribute__
class SlotEnum(_CreateEnumTypeUponFirstAccess):
SlotEnum = '''\
class Slot(enum.IntEnum):
"""
Enumerates possible positions of the Ad.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): The value is unknown in this version.
SEARCH_SIDE (int): Google search: Side.
SEARCH_TOP (int): Google search: Top.
SEARCH_OTHER (int): Google search: Other.
CONTENT (int): Google Display Network.
SEARCH_PARTNER_TOP (int): Search partners: Top.
SEARCH_PARTNER_OTHER (int): Search partners: Other.
MIXED (int): Cross-network.
"""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_SIDE = 2
SEARCH_TOP = 3
SEARCH_OTHER = 4
CONTENT = 5
SEARCH_PARTNER_TOP = 6
SEARCH_PARTNER_OTHER = 7
MIXED = 8
'''
SlotEnum = SlotEnum() # For __getattribute__
class SpendingLimitTypeEnum(_CreateEnumTypeUponFirstAccess):
SpendingLimitTypeEnum = '''\
class SpendingLimitType(enum.IntEnum):
"""
The possible spending limit types used by certain resources as an
alternative to absolute money values in micros.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
INFINITE (int): Infinite, indicates unlimited spending power.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INFINITE = 2
'''
SpendingLimitTypeEnum = SpendingLimitTypeEnum() # For __getattribute__
class StringFormatErrorEnum(_CreateEnumTypeUponFirstAccess):
StringFormatErrorEnum = '''\
class StringFormatError(enum.IntEnum):
"""
Enum describing possible string format errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
ILLEGAL_CHARS (int): The input string value contains disallowed characters.
INVALID_FORMAT (int): The input string value is invalid for the associated field.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ILLEGAL_CHARS = 2
INVALID_FORMAT = 3
'''
StringFormatErrorEnum = StringFormatErrorEnum() # For __getattribute__
class StringLengthErrorEnum(_CreateEnumTypeUponFirstAccess):
StringLengthErrorEnum = '''\
class StringLengthError(enum.IntEnum):
"""
Enum describing possible string length errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EMPTY (int): The specified field should have a least one non-whitespace character in
it.
TOO_SHORT (int): Too short.
TOO_LONG (int): Too long.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EMPTY = 4
TOO_SHORT = 2
TOO_LONG = 3
'''
StringLengthErrorEnum = StringLengthErrorEnum() # For __getattribute__
class StructuredSnippetPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
StructuredSnippetPlaceholderFieldEnum = '''\
class StructuredSnippetPlaceholderField(enum.IntEnum):
"""
Possible values for Structured Snippet placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
HEADER (int): Data Type: STRING. The category of snippet of your products/services.
Must match exactly one of the predefined structured snippets headers.
For a list, visit
https://developers.google.com/adwords/api/docs/appendix/structured-snippet-headers
SNIPPETS (int): Data Type: STRING\_LIST. Text values that describe your
products/services. All text must be family safe. Special or non-ASCII
characters are not permitted. A snippet can be at most 25 characters.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HEADER = 2
SNIPPETS = 3
'''
StructuredSnippetPlaceholderFieldEnum = StructuredSnippetPlaceholderFieldEnum() # For __getattribute__
class SummaryRowSettingEnum(_CreateEnumTypeUponFirstAccess):
SummaryRowSettingEnum = '''\
class SummaryRowSetting(enum.IntEnum):
"""
Enum describing return summary row settings.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Represent unknown values of return summary row.
NO_SUMMARY_ROW (int): Do not return summary row.
SUMMARY_ROW_WITH_RESULTS (int): Return summary row along with results. The summary row will be returned
in the last batch alone (last batch will contain no results).
SUMMARY_ROW_ONLY (int): Return summary row only and return no results.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NO_SUMMARY_ROW = 2
SUMMARY_ROW_WITH_RESULTS = 3
SUMMARY_ROW_ONLY = 4
'''
SummaryRowSettingEnum = SummaryRowSettingEnum() # For __getattribute__
class SystemManagedResourceSourceEnum(_CreateEnumTypeUponFirstAccess):
SystemManagedResourceSourceEnum = '''\
class SystemManagedResourceSource(enum.IntEnum):
"""
Enum listing the possible system managed entity sources.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AD_VARIATIONS (int): Generated ad variations experiment ad.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AD_VARIATIONS = 2
'''
SystemManagedResourceSourceEnum = SystemManagedResourceSourceEnum() # For __getattribute__
class TargetCpaOptInRecommendationGoalEnum(_CreateEnumTypeUponFirstAccess):
TargetCpaOptInRecommendationGoalEnum = '''\
class TargetCpaOptInRecommendationGoal(enum.IntEnum):
"""
Goal of TargetCpaOptIn recommendation.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
SAME_COST (int): Recommendation to set Target CPA to maintain the same cost.
SAME_CONVERSIONS (int): Recommendation to set Target CPA to maintain the same conversions.
SAME_CPA (int): Recommendation to set Target CPA to maintain the same CPA.
CLOSEST_CPA (int): Recommendation to set Target CPA to a value that is as close as possible
to, yet lower than, the actual CPA (computed for past 28 days).
"""
UNSPECIFIED = 0
UNKNOWN = 1
SAME_COST = 2
SAME_CONVERSIONS = 3
SAME_CPA = 4
CLOSEST_CPA = 5
'''
TargetCpaOptInRecommendationGoalEnum = TargetCpaOptInRecommendationGoalEnum() # For __getattribute__
class TargetImpressionShareLocationEnum(_CreateEnumTypeUponFirstAccess):
TargetImpressionShareLocationEnum = '''\
class TargetImpressionShareLocation(enum.IntEnum):
"""
Enum describing possible goals.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ANYWHERE_ON_PAGE (int): Any location on the web page.
TOP_OF_PAGE (int): Top box of ads.
ABSOLUTE_TOP_OF_PAGE (int): Top slot in the top box of ads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ANYWHERE_ON_PAGE = 2
TOP_OF_PAGE = 3
ABSOLUTE_TOP_OF_PAGE = 4
'''
TargetImpressionShareLocationEnum = TargetImpressionShareLocationEnum() # For __getattribute__
class TargetRestrictionOperation(_CreateEnumTypeUponFirstAccess):
TargetRestrictionOperation = '''\
class Operator(enum.IntEnum):
"""
The operator.
Attributes:
UNSPECIFIED (int): Unspecified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ADD (int): Add the restriction to the existing restrictions.
REMOVE (int): Remove the restriction from the existing restrictions.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ADD = 2
REMOVE = 3
'''
TargetRestrictionOperation = TargetRestrictionOperation() # For __getattribute__
class TargetingDimensionEnum(_CreateEnumTypeUponFirstAccess):
TargetingDimensionEnum = '''\
class TargetingDimension(enum.IntEnum):
"""
Enum describing possible targeting dimensions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
KEYWORD (int): Keyword criteria, e.g. 'mars cruise'. KEYWORD may be used as a custom bid
dimension. Keywords are always a targeting dimension, so may not be set
as a target "ALL" dimension with TargetRestriction.
AUDIENCE (int): Audience criteria, which include user list, user interest, custom
affinity, and custom in market.
TOPIC (int): Topic criteria for targeting categories of content, e.g.
'category::Animals>Pets' Used for Display and Video targeting.
GENDER (int): Criteria for targeting gender.
AGE_RANGE (int): Criteria for targeting age ranges.
PLACEMENT (int): Placement criteria, which include websites like 'www.flowers4sale.com',
as well as mobile applications, mobile app categories, YouTube videos,
and YouTube channels.
PARENTAL_STATUS (int): Criteria for parental status targeting.
INCOME_RANGE (int): Criteria for income range targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
AUDIENCE = 3
TOPIC = 4
GENDER = 5
AGE_RANGE = 6
PLACEMENT = 7
PARENTAL_STATUS = 8
INCOME_RANGE = 9
'''
TargetingDimensionEnum = TargetingDimensionEnum() # For __getattribute__
class TimeTypeEnum(_CreateEnumTypeUponFirstAccess):
TimeTypeEnum = '''\
class TimeType(enum.IntEnum):
"""
The possible time types used by certain resources as an alternative to
absolute timestamps.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
NOW (int): As soon as possible.
FOREVER (int): An infinite point in the future.
"""
UNSPECIFIED = 0
UNKNOWN = 1
NOW = 2
FOREVER = 3
'''
TimeTypeEnum = TimeTypeEnum() # For __getattribute__
class TimeZoneErrorEnum(_CreateEnumTypeUponFirstAccess):
TimeZoneErrorEnum = '''\
class TimeZoneError(enum.IntEnum):
"""
Enum describing possible currency code errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_TIME_ZONE (int): Time zone is not valid.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_TIME_ZONE = 5
'''
TimeZoneErrorEnum = TimeZoneErrorEnum() # For __getattribute__
class TrackingCodePageFormatEnum(_CreateEnumTypeUponFirstAccess):
TrackingCodePageFormatEnum = '''\
class TrackingCodePageFormat(enum.IntEnum):
"""
The format of the web page where the tracking tag and snippet will be
installed.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
HTML (int): Standard HTML page format.
AMP (int): Google AMP page format.
"""
UNSPECIFIED = 0
UNKNOWN = 1
HTML = 2
AMP = 3
'''
TrackingCodePageFormatEnum = TrackingCodePageFormatEnum() # For __getattribute__
class TrackingCodeTypeEnum(_CreateEnumTypeUponFirstAccess):
TrackingCodeTypeEnum = '''\
class TrackingCodeType(enum.IntEnum):
"""
The type of the generated tag snippets for tracking conversions.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
WEBPAGE (int): The snippet that is fired as a result of a website page loading.
WEBPAGE_ONCLICK (int): The snippet contains a JavaScript function which fires the tag. This
function is typically called from an onClick handler added to a link or
button element on the page.
CLICK_TO_CALL (int): For embedding on a mobile webpage. The snippet contains a JavaScript
function which fires the tag.
WEBSITE_CALL (int): The snippet that is used to replace the phone number on your website with
a Google forwarding number for call tracking purposes.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBPAGE = 2
WEBPAGE_ONCLICK = 3
CLICK_TO_CALL = 4
WEBSITE_CALL = 5
'''
TrackingCodeTypeEnum = TrackingCodeTypeEnum() # For __getattribute__
class TravelPlaceholderFieldEnum(_CreateEnumTypeUponFirstAccess):
TravelPlaceholderFieldEnum = '''\
class TravelPlaceholderField(enum.IntEnum):
"""
Possible values for Travel placeholder fields.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
DESTINATION_ID (int): Data Type: STRING. Required. Destination id. Example: PAR, LON.
For feed items that only have destination id, destination id must be a
unique key. For feed items that have both destination id and origin id,
then the combination must be a unique key.
ORIGIN_ID (int): Data Type: STRING. Origin id. Example: PAR, LON. Combination of
DESTINATION\_ID and ORIGIN\_ID must be unique per offer.
TITLE (int): Data Type: STRING. Required. Main headline with name to be shown in
dynamic ad.
DESTINATION_NAME (int): Data Type: STRING. The destination name. Shorter names are recommended.
ORIGIN_NAME (int): Data Type: STRING. Origin name. Shorter names are recommended.
PRICE (int): Data Type: STRING. Price to be shown in the ad. Highly recommended for
dynamic ads.
Example: "100.00 USD"
FORMATTED_PRICE (int): Data Type: STRING. Formatted price to be shown in the ad.
Example: "Starting at $100.00 USD", "$80 - $100"
SALE_PRICE (int): Data Type: STRING. Sale price to be shown in the ad.
Example: "80.00 USD"
FORMATTED_SALE_PRICE (int): Data Type: STRING. Formatted sale price to be shown in the ad.
Example: "On sale for $80.00", "$60 - $80"
IMAGE_URL (int): Data Type: URL. Image to be displayed in the ad.
CATEGORY (int): Data Type: STRING. Category of travel offer used to group like items
together for recommendation engine.
CONTEXTUAL_KEYWORDS (int): Data Type: STRING\_LIST. Keywords used for product retrieval.
DESTINATION_ADDRESS (int): Data Type: STRING. Address of travel offer, including postal code.
FINAL_URL (int): Data Type: URL\_LIST. Required. Final URLs to be used in ad, when using
Upgraded URLs; the more specific the better (e.g. the individual URL of
a specific travel offer and its location).
FINAL_MOBILE_URLS (int): Data Type: URL\_LIST. Final mobile URLs for the ad when using Upgraded
URLs.
TRACKING_URL (int): Data Type: URL. Tracking template for the ad when using Upgraded URLs.
ANDROID_APP_LINK (int): Data Type: STRING. Android app link. Must be formatted as:
android-app://{package\_id}/{scheme}/{host\_path}. The components are
defined as follows: package\_id: app ID as specified in Google Play.
scheme: the scheme to pass to the application. Can be HTTP, or a custom
scheme. host\_path: identifies the specific content within your
application.
SIMILAR_DESTINATION_IDS (int): Data Type: STRING\_LIST. List of recommended destination IDs to show
together with this item.
IOS_APP_LINK (int): Data Type: STRING. iOS app link.
IOS_APP_STORE_ID (int): Data Type: INT64. iOS app store ID.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DESTINATION_ID = 2
ORIGIN_ID = 3
TITLE = 4
DESTINATION_NAME = 5
ORIGIN_NAME = 6
PRICE = 7
FORMATTED_PRICE = 8
SALE_PRICE = 9
FORMATTED_SALE_PRICE = 10
IMAGE_URL = 11
CATEGORY = 12
CONTEXTUAL_KEYWORDS = 13
DESTINATION_ADDRESS = 14
FINAL_URL = 15
FINAL_MOBILE_URLS = 16
TRACKING_URL = 17
ANDROID_APP_LINK = 18
SIMILAR_DESTINATION_IDS = 19
IOS_APP_LINK = 20
IOS_APP_STORE_ID = 21
'''
TravelPlaceholderFieldEnum = TravelPlaceholderFieldEnum() # For __getattribute__
class UrlFieldErrorEnum(_CreateEnumTypeUponFirstAccess):
UrlFieldErrorEnum = '''\
class UrlFieldError(enum.IntEnum):
"""
Enum describing possible url field errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
INVALID_TRACKING_URL_TEMPLATE (int): The tracking url template is invalid.
INVALID_TAG_IN_TRACKING_URL_TEMPLATE (int): The tracking url template contains invalid tag.
MISSING_TRACKING_URL_TEMPLATE_TAG (int): The tracking url template must contain at least one tag (e.g. {lpurl}),
This applies only to tracking url template associated with website ads or
product ads.
MISSING_PROTOCOL_IN_TRACKING_URL_TEMPLATE (int): The tracking url template must start with a valid protocol (or lpurl
tag).
INVALID_PROTOCOL_IN_TRACKING_URL_TEMPLATE (int): The tracking url template starts with an invalid protocol.
MALFORMED_TRACKING_URL_TEMPLATE (int): The tracking url template contains illegal characters.
MISSING_HOST_IN_TRACKING_URL_TEMPLATE (int): The tracking url template must contain a host name (or lpurl tag).
INVALID_TLD_IN_TRACKING_URL_TEMPLATE (int): The tracking url template has an invalid or missing top level domain
extension.
REDUNDANT_NESTED_TRACKING_URL_TEMPLATE_TAG (int): The tracking url template contains nested occurrences of the same
conditional tag (i.e. {ifmobile:{ifmobile:x}}).
INVALID_FINAL_URL (int): The final url is invalid.
INVALID_TAG_IN_FINAL_URL (int): The final url contains invalid tag.
REDUNDANT_NESTED_FINAL_URL_TAG (int): The final url contains nested occurrences of the same conditional tag
(i.e. {ifmobile:{ifmobile:x}}).
MISSING_PROTOCOL_IN_FINAL_URL (int): The final url must start with a valid protocol.
INVALID_PROTOCOL_IN_FINAL_URL (int): The final url starts with an invalid protocol.
MALFORMED_FINAL_URL (int): The final url contains illegal characters.
MISSING_HOST_IN_FINAL_URL (int): The final url must contain a host name.
INVALID_TLD_IN_FINAL_URL (int): The tracking url template has an invalid or missing top level domain
extension.
INVALID_FINAL_MOBILE_URL (int): The final mobile url is invalid.
INVALID_TAG_IN_FINAL_MOBILE_URL (int): The final mobile url contains invalid tag.
REDUNDANT_NESTED_FINAL_MOBILE_URL_TAG (int): The final mobile url contains nested occurrences of the same conditional
tag (i.e. {ifmobile:{ifmobile:x}}).
MISSING_PROTOCOL_IN_FINAL_MOBILE_URL (int): The final mobile url must start with a valid protocol.
INVALID_PROTOCOL_IN_FINAL_MOBILE_URL (int): The final mobile url starts with an invalid protocol.
MALFORMED_FINAL_MOBILE_URL (int): The final mobile url contains illegal characters.
MISSING_HOST_IN_FINAL_MOBILE_URL (int): The final mobile url must contain a host name.
INVALID_TLD_IN_FINAL_MOBILE_URL (int): The tracking url template has an invalid or missing top level domain
extension.
INVALID_FINAL_APP_URL (int): The final app url is invalid.
INVALID_TAG_IN_FINAL_APP_URL (int): The final app url contains invalid tag.
REDUNDANT_NESTED_FINAL_APP_URL_TAG (int): The final app url contains nested occurrences of the same conditional tag
(i.e. {ifmobile:{ifmobile:x}}).
MULTIPLE_APP_URLS_FOR_OSTYPE (int): More than one app url found for the same OS type.
INVALID_OSTYPE (int): The OS type given for an app url is not valid.
INVALID_PROTOCOL_FOR_APP_URL (int): The protocol given for an app url is not valid. (E.g. "android-app://")
INVALID_PACKAGE_ID_FOR_APP_URL (int): The package id (app id) given for an app url is not valid.
URL_CUSTOM_PARAMETERS_COUNT_EXCEEDS_LIMIT (int): The number of url custom parameters for an resource exceeds the maximum
limit allowed.
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_KEY (int): An invalid character appears in the parameter key.
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_VALUE (int): An invalid character appears in the parameter value.
INVALID_TAG_IN_URL_CUSTOM_PARAMETER_VALUE (int): The url custom parameter value fails url tag validation.
REDUNDANT_NESTED_URL_CUSTOM_PARAMETER_TAG (int): The custom parameter contains nested occurrences of the same conditional
tag (i.e. {ifmobile:{ifmobile:x}}).
MISSING_PROTOCOL (int): The protocol (http:// or https://) is missing.
INVALID_PROTOCOL (int): Unsupported protocol in URL. Only http and https are supported.
INVALID_URL (int): The url is invalid.
DESTINATION_URL_DEPRECATED (int): Destination Url is deprecated.
INVALID_TAG_IN_URL (int): The url contains invalid tag.
MISSING_URL_TAG (int): The url must contain at least one tag (e.g. {lpurl}), This applies only
to urls associated with website ads or product ads.
DUPLICATE_URL_ID (int): Duplicate url id.
INVALID_URL_ID (int): Invalid url id.
FINAL_URL_SUFFIX_MALFORMED (int): The final url suffix cannot begin with '?' or '&' characters and must be
a valid query string.
INVALID_TAG_IN_FINAL_URL_SUFFIX (int): The final url suffix cannot contain {lpurl} related or {ignore} tags.
INVALID_TOP_LEVEL_DOMAIN (int): The top level domain is invalid, e.g, not a public top level domain
listed in publicsuffix.org.
MALFORMED_TOP_LEVEL_DOMAIN (int): Malformed top level domain in URL.
MALFORMED_URL (int): Malformed URL.
MISSING_HOST (int): No host found in URL.
NULL_CUSTOM_PARAMETER_VALUE (int): Custom parameter value cannot be null.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_TRACKING_URL_TEMPLATE = 2
INVALID_TAG_IN_TRACKING_URL_TEMPLATE = 3
MISSING_TRACKING_URL_TEMPLATE_TAG = 4
MISSING_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 5
INVALID_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 6
MALFORMED_TRACKING_URL_TEMPLATE = 7
MISSING_HOST_IN_TRACKING_URL_TEMPLATE = 8
INVALID_TLD_IN_TRACKING_URL_TEMPLATE = 9
REDUNDANT_NESTED_TRACKING_URL_TEMPLATE_TAG = 10
INVALID_FINAL_URL = 11
INVALID_TAG_IN_FINAL_URL = 12
REDUNDANT_NESTED_FINAL_URL_TAG = 13
MISSING_PROTOCOL_IN_FINAL_URL = 14
INVALID_PROTOCOL_IN_FINAL_URL = 15
MALFORMED_FINAL_URL = 16
MISSING_HOST_IN_FINAL_URL = 17
INVALID_TLD_IN_FINAL_URL = 18
INVALID_FINAL_MOBILE_URL = 19
INVALID_TAG_IN_FINAL_MOBILE_URL = 20
REDUNDANT_NESTED_FINAL_MOBILE_URL_TAG = 21
MISSING_PROTOCOL_IN_FINAL_MOBILE_URL = 22
INVALID_PROTOCOL_IN_FINAL_MOBILE_URL = 23
MALFORMED_FINAL_MOBILE_URL = 24
MISSING_HOST_IN_FINAL_MOBILE_URL = 25
INVALID_TLD_IN_FINAL_MOBILE_URL = 26
INVALID_FINAL_APP_URL = 27
INVALID_TAG_IN_FINAL_APP_URL = 28
REDUNDANT_NESTED_FINAL_APP_URL_TAG = 29
MULTIPLE_APP_URLS_FOR_OSTYPE = 30
INVALID_OSTYPE = 31
INVALID_PROTOCOL_FOR_APP_URL = 32
INVALID_PACKAGE_ID_FOR_APP_URL = 33
URL_CUSTOM_PARAMETERS_COUNT_EXCEEDS_LIMIT = 34
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_KEY = 39
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_VALUE = 40
INVALID_TAG_IN_URL_CUSTOM_PARAMETER_VALUE = 41
REDUNDANT_NESTED_URL_CUSTOM_PARAMETER_TAG = 42
MISSING_PROTOCOL = 43
INVALID_PROTOCOL = 52
INVALID_URL = 44
DESTINATION_URL_DEPRECATED = 45
INVALID_TAG_IN_URL = 46
MISSING_URL_TAG = 47
DUPLICATE_URL_ID = 48
INVALID_URL_ID = 49
FINAL_URL_SUFFIX_MALFORMED = 50
INVALID_TAG_IN_FINAL_URL_SUFFIX = 51
INVALID_TOP_LEVEL_DOMAIN = 53
MALFORMED_TOP_LEVEL_DOMAIN = 54
MALFORMED_URL = 55
MISSING_HOST = 56
NULL_CUSTOM_PARAMETER_VALUE = 57
'''
UrlFieldErrorEnum = UrlFieldErrorEnum() # For __getattribute__
class UserInterestTaxonomyTypeEnum(_CreateEnumTypeUponFirstAccess):
UserInterestTaxonomyTypeEnum = '''\
class UserInterestTaxonomyType(enum.IntEnum):
"""
Enum containing the possible UserInterestTaxonomyTypes.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AFFINITY (int): The affinity for this user interest.
IN_MARKET (int): The market for this user interest.
MOBILE_APP_INSTALL_USER (int): Users known to have installed applications in the specified categories.
VERTICAL_GEO (int): The geographical location of the interest-based vertical.
NEW_SMART_PHONE_USER (int): User interest criteria for new smart phone users.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AFFINITY = 2
IN_MARKET = 3
MOBILE_APP_INSTALL_USER = 4
VERTICAL_GEO = 5
NEW_SMART_PHONE_USER = 6
'''
UserInterestTaxonomyTypeEnum = UserInterestTaxonomyTypeEnum() # For __getattribute__
class UserListAccessStatusEnum(_CreateEnumTypeUponFirstAccess):
UserListAccessStatusEnum = '''\
class UserListAccessStatus(enum.IntEnum):
"""
Enum containing possible user list access statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ENABLED (int): The access is enabled.
DISABLED (int): The access is disabled.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
DISABLED = 3
'''
UserListAccessStatusEnum = UserListAccessStatusEnum() # For __getattribute__
class UserListClosingReasonEnum(_CreateEnumTypeUponFirstAccess):
UserListClosingReasonEnum = '''\
class UserListClosingReason(enum.IntEnum):
"""
Enum describing possible user list closing reasons.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
UNUSED (int): The userlist was closed because of not being used for over one year.
"""
UNSPECIFIED = 0
UNKNOWN = 1
UNUSED = 2
'''
UserListClosingReasonEnum = UserListClosingReasonEnum() # For __getattribute__
class UserListCombinedRuleOperatorEnum(_CreateEnumTypeUponFirstAccess):
UserListCombinedRuleOperatorEnum = '''\
class UserListCombinedRuleOperator(enum.IntEnum):
"""
Enum describing possible user list combined rule operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AND (int): A AND B.
AND_NOT (int): A AND NOT B.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AND = 2
AND_NOT = 3
'''
UserListCombinedRuleOperatorEnum = UserListCombinedRuleOperatorEnum() # For __getattribute__
class UserListCrmDataSourceTypeEnum(_CreateEnumTypeUponFirstAccess):
UserListCrmDataSourceTypeEnum = '''\
class UserListCrmDataSourceType(enum.IntEnum):
"""
Enum describing possible user list crm data source type.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
FIRST_PARTY (int): The uploaded data is first-party data.
THIRD_PARTY_CREDIT_BUREAU (int): The uploaded data is from a third-party credit bureau.
THIRD_PARTY_VOTER_FILE (int): The uploaded data is from a third-party voter file.
"""
UNSPECIFIED = 0
UNKNOWN = 1
FIRST_PARTY = 2
THIRD_PARTY_CREDIT_BUREAU = 3
THIRD_PARTY_VOTER_FILE = 4
'''
UserListCrmDataSourceTypeEnum = UserListCrmDataSourceTypeEnum() # For __getattribute__
class UserListDateRuleItemOperatorEnum(_CreateEnumTypeUponFirstAccess):
UserListDateRuleItemOperatorEnum = '''\
class UserListDateRuleItemOperator(enum.IntEnum):
"""
Enum describing possible user list date rule item operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EQUALS (int): Equals.
NOT_EQUALS (int): Not Equals.
BEFORE (int): Before.
AFTER (int): After.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS = 2
NOT_EQUALS = 3
BEFORE = 4
AFTER = 5
'''
UserListDateRuleItemOperatorEnum = UserListDateRuleItemOperatorEnum() # For __getattribute__
class UserListErrorEnum(_CreateEnumTypeUponFirstAccess):
UserListErrorEnum = '''\
class UserListError(enum.IntEnum):
"""
Enum describing possible user list errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
EXTERNAL_REMARKETING_USER_LIST_MUTATE_NOT_SUPPORTED (int): Creating and updating external remarketing user lists is not supported.
CONCRETE_TYPE_REQUIRED (int): Concrete type of user list is required.
CONVERSION_TYPE_ID_REQUIRED (int): Creating/updating user list conversion types requires specifying the
conversion type Id.
DUPLICATE_CONVERSION_TYPES (int): Remarketing user list cannot have duplicate conversion types.
INVALID_CONVERSION_TYPE (int): Conversion type is invalid/unknown.
INVALID_DESCRIPTION (int): User list description is empty or invalid.
INVALID_NAME (int): User list name is empty or invalid.
INVALID_TYPE (int): Type of the UserList does not match.
CAN_NOT_ADD_LOGICAL_LIST_AS_LOGICAL_LIST_OPERAND (int): Embedded logical user lists are not allowed.
INVALID_USER_LIST_LOGICAL_RULE_OPERAND (int): User list rule operand is invalid.
NAME_ALREADY_USED (int): Name is already being used for another user list for the account.
NEW_CONVERSION_TYPE_NAME_REQUIRED (int): Name is required when creating a new conversion type.
CONVERSION_TYPE_NAME_ALREADY_USED (int): The given conversion type name has been used.
OWNERSHIP_REQUIRED_FOR_SET (int): Only an owner account may edit a user list.
USER_LIST_MUTATE_NOT_SUPPORTED (int): Creating user list without setting type in oneof user\_list field, or
creating/updating read-only user list types is not allowed.
INVALID_RULE (int): Rule is invalid.
INVALID_DATE_RANGE (int): The specified date range is empty.
CAN_NOT_MUTATE_SENSITIVE_USERLIST (int): A UserList which is privacy sensitive or legal rejected cannot be mutated
by external users.
MAX_NUM_RULEBASED_USERLISTS (int): Maximum number of rulebased user lists a customer can have.
CANNOT_MODIFY_BILLABLE_RECORD_COUNT (int): BasicUserList's billable record field cannot be modified once it is set.
APP_ID_NOT_SET (int): crm\_based\_user\_list.app\_id field must be set when upload\_key\_type
is MOBILE\_ADVERTISING\_ID.
USERLIST_NAME_IS_RESERVED_FOR_SYSTEM_LIST (int): Name of the user list is reserved for system generated lists and cannot
be used.
ADVERTISER_NOT_WHITELISTED_FOR_USING_UPLOADED_DATA (int): Advertiser needs to be whitelisted to use remarketing lists created from
advertiser uploaded data (e.g., Customer Match lists).
RULE_TYPE_IS_NOT_SUPPORTED (int): The provided rule\_type is not supported for the user list.
CAN_NOT_ADD_A_SIMILAR_USERLIST_AS_LOGICAL_LIST_OPERAND (int): Similar user list cannot be used as a logical user list operand.
CAN_NOT_MIX_CRM_BASED_IN_LOGICAL_LIST_WITH_OTHER_LISTS (int): Logical user list should not have a mix of CRM based user list and other
types of lists in its rules.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EXTERNAL_REMARKETING_USER_LIST_MUTATE_NOT_SUPPORTED = 2
CONCRETE_TYPE_REQUIRED = 3
CONVERSION_TYPE_ID_REQUIRED = 4
DUPLICATE_CONVERSION_TYPES = 5
INVALID_CONVERSION_TYPE = 6
INVALID_DESCRIPTION = 7
INVALID_NAME = 8
INVALID_TYPE = 9
CAN_NOT_ADD_LOGICAL_LIST_AS_LOGICAL_LIST_OPERAND = 10
INVALID_USER_LIST_LOGICAL_RULE_OPERAND = 11
NAME_ALREADY_USED = 12
NEW_CONVERSION_TYPE_NAME_REQUIRED = 13
CONVERSION_TYPE_NAME_ALREADY_USED = 14
OWNERSHIP_REQUIRED_FOR_SET = 15
USER_LIST_MUTATE_NOT_SUPPORTED = 16
INVALID_RULE = 17
INVALID_DATE_RANGE = 27
CAN_NOT_MUTATE_SENSITIVE_USERLIST = 28
MAX_NUM_RULEBASED_USERLISTS = 29
CANNOT_MODIFY_BILLABLE_RECORD_COUNT = 30
APP_ID_NOT_SET = 31
USERLIST_NAME_IS_RESERVED_FOR_SYSTEM_LIST = 32
ADVERTISER_NOT_WHITELISTED_FOR_USING_UPLOADED_DATA = 33
RULE_TYPE_IS_NOT_SUPPORTED = 34
CAN_NOT_ADD_A_SIMILAR_USERLIST_AS_LOGICAL_LIST_OPERAND = 35
CAN_NOT_MIX_CRM_BASED_IN_LOGICAL_LIST_WITH_OTHER_LISTS = 36
'''
UserListErrorEnum = UserListErrorEnum() # For __getattribute__
class UserListLogicalRuleOperatorEnum(_CreateEnumTypeUponFirstAccess):
UserListLogicalRuleOperatorEnum = '''\
class UserListLogicalRuleOperator(enum.IntEnum):
"""
Enum describing possible user list logical rule operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
ALL (int): And - all of the operands.
ANY (int): Or - at least one of the operands.
NONE (int): Not - none of the operands.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ALL = 2
ANY = 3
NONE = 4
'''
UserListLogicalRuleOperatorEnum = UserListLogicalRuleOperatorEnum() # For __getattribute__
class UserListMembershipStatusEnum(_CreateEnumTypeUponFirstAccess):
UserListMembershipStatusEnum = '''\
class UserListMembershipStatus(enum.IntEnum):
"""
Enum containing possible user list membership statuses.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
OPEN (int): Open status - List is accruing members and can be targeted to.
CLOSED (int): Closed status - No new members being added. Cannot be used for targeting.
"""
UNSPECIFIED = 0
UNKNOWN = 1
OPEN = 2
CLOSED = 3
'''
UserListMembershipStatusEnum = UserListMembershipStatusEnum() # For __getattribute__
class UserListNumberRuleItemOperatorEnum(_CreateEnumTypeUponFirstAccess):
UserListNumberRuleItemOperatorEnum = '''\
class UserListNumberRuleItemOperator(enum.IntEnum):
"""
Enum describing possible user list number rule item operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
GREATER_THAN (int): Greater than.
GREATER_THAN_OR_EQUAL (int): Greater than or equal.
EQUALS (int): Equals.
NOT_EQUALS (int): Not equals.
LESS_THAN (int): Less than.
LESS_THAN_OR_EQUAL (int): Less than or equal.
"""
UNSPECIFIED = 0
UNKNOWN = 1
GREATER_THAN = 2
GREATER_THAN_OR_EQUAL = 3
EQUALS = 4
NOT_EQUALS = 5
LESS_THAN = 6
LESS_THAN_OR_EQUAL = 7
'''
UserListNumberRuleItemOperatorEnum = UserListNumberRuleItemOperatorEnum() # For __getattribute__
class UserListPrepopulationStatusEnum(_CreateEnumTypeUponFirstAccess):
UserListPrepopulationStatusEnum = '''\
class UserListPrepopulationStatus(enum.IntEnum):
"""
Enum describing possible user list prepopulation status.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
REQUESTED (int): Prepopoulation is being requested.
FINISHED (int): Prepopulation is finished.
FAILED (int): Prepopulation failed.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REQUESTED = 2
FINISHED = 3
FAILED = 4
'''
UserListPrepopulationStatusEnum = UserListPrepopulationStatusEnum() # For __getattribute__
class UserListRuleTypeEnum(_CreateEnumTypeUponFirstAccess):
UserListRuleTypeEnum = '''\
class UserListRuleType(enum.IntEnum):
"""
Enum describing possible user list rule types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
AND_OF_ORS (int): Conjunctive normal form.
OR_OF_ANDS (int): Disjunctive normal form.
"""
UNSPECIFIED = 0
UNKNOWN = 1
AND_OF_ORS = 2
OR_OF_ANDS = 3
'''
UserListRuleTypeEnum = UserListRuleTypeEnum() # For __getattribute__
class UserListSizeRangeEnum(_CreateEnumTypeUponFirstAccess):
UserListSizeRangeEnum = '''\
class UserListSizeRange(enum.IntEnum):
"""
Enum containing possible user list size ranges.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
LESS_THAN_FIVE_HUNDRED (int): User list has less than 500 users.
LESS_THAN_ONE_THOUSAND (int): User list has number of users in range of 500 to 1000.
ONE_THOUSAND_TO_TEN_THOUSAND (int): User list has number of users in range of 1000 to 10000.
TEN_THOUSAND_TO_FIFTY_THOUSAND (int): User list has number of users in range of 10000 to 50000.
FIFTY_THOUSAND_TO_ONE_HUNDRED_THOUSAND (int): User list has number of users in range of 50000 to 100000.
ONE_HUNDRED_THOUSAND_TO_THREE_HUNDRED_THOUSAND (int): User list has number of users in range of 100000 to 300000.
THREE_HUNDRED_THOUSAND_TO_FIVE_HUNDRED_THOUSAND (int): User list has number of users in range of 300000 to 500000.
FIVE_HUNDRED_THOUSAND_TO_ONE_MILLION (int): User list has number of users in range of 500000 to 1 million.
ONE_MILLION_TO_TWO_MILLION (int): User list has number of users in range of 1 to 2 millions.
TWO_MILLION_TO_THREE_MILLION (int): User list has number of users in range of 2 to 3 millions.
THREE_MILLION_TO_FIVE_MILLION (int): User list has number of users in range of 3 to 5 millions.
FIVE_MILLION_TO_TEN_MILLION (int): User list has number of users in range of 5 to 10 millions.
TEN_MILLION_TO_TWENTY_MILLION (int): User list has number of users in range of 10 to 20 millions.
TWENTY_MILLION_TO_THIRTY_MILLION (int): User list has number of users in range of 20 to 30 millions.
THIRTY_MILLION_TO_FIFTY_MILLION (int): User list has number of users in range of 30 to 50 millions.
OVER_FIFTY_MILLION (int): User list has over 50 million users.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LESS_THAN_FIVE_HUNDRED = 2
LESS_THAN_ONE_THOUSAND = 3
ONE_THOUSAND_TO_TEN_THOUSAND = 4
TEN_THOUSAND_TO_FIFTY_THOUSAND = 5
FIFTY_THOUSAND_TO_ONE_HUNDRED_THOUSAND = 6
ONE_HUNDRED_THOUSAND_TO_THREE_HUNDRED_THOUSAND = 7
THREE_HUNDRED_THOUSAND_TO_FIVE_HUNDRED_THOUSAND = 8
FIVE_HUNDRED_THOUSAND_TO_ONE_MILLION = 9
ONE_MILLION_TO_TWO_MILLION = 10
TWO_MILLION_TO_THREE_MILLION = 11
THREE_MILLION_TO_FIVE_MILLION = 12
FIVE_MILLION_TO_TEN_MILLION = 13
TEN_MILLION_TO_TWENTY_MILLION = 14
TWENTY_MILLION_TO_THIRTY_MILLION = 15
THIRTY_MILLION_TO_FIFTY_MILLION = 16
OVER_FIFTY_MILLION = 17
'''
UserListSizeRangeEnum = UserListSizeRangeEnum() # For __getattribute__
class UserListStringRuleItemOperatorEnum(_CreateEnumTypeUponFirstAccess):
UserListStringRuleItemOperatorEnum = '''\
class UserListStringRuleItemOperator(enum.IntEnum):
"""
Enum describing possible user list string rule item operators.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
CONTAINS (int): Contains.
EQUALS (int): Equals.
STARTS_WITH (int): Starts with.
ENDS_WITH (int): Ends with.
NOT_EQUALS (int): Not equals.
NOT_CONTAINS (int): Not contains.
NOT_STARTS_WITH (int): Not starts with.
NOT_ENDS_WITH (int): Not ends with.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONTAINS = 2
EQUALS = 3
STARTS_WITH = 4
ENDS_WITH = 5
NOT_EQUALS = 6
NOT_CONTAINS = 7
NOT_STARTS_WITH = 8
NOT_ENDS_WITH = 9
'''
UserListStringRuleItemOperatorEnum = UserListStringRuleItemOperatorEnum() # For __getattribute__
class UserListTypeEnum(_CreateEnumTypeUponFirstAccess):
UserListTypeEnum = '''\
class UserListType(enum.IntEnum):
"""
Enum containing possible user list types.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
REMARKETING (int): UserList represented as a collection of conversion types.
LOGICAL (int): UserList represented as a combination of other user lists/interests.
EXTERNAL_REMARKETING (int): UserList created in the Google Ad Manager platform.
RULE_BASED (int): UserList associated with a rule.
SIMILAR (int): UserList with users similar to users of another UserList.
CRM_BASED (int): UserList of first-party CRM data provided by advertiser in the form of
emails or other formats.
"""
UNSPECIFIED = 0
UNKNOWN = 1
REMARKETING = 2
LOGICAL = 3
EXTERNAL_REMARKETING = 4
RULE_BASED = 5
SIMILAR = 6
CRM_BASED = 7
'''
UserListTypeEnum = UserListTypeEnum() # For __getattribute__
class VanityPharmaDisplayUrlModeEnum(_CreateEnumTypeUponFirstAccess):
VanityPharmaDisplayUrlModeEnum = '''\
class VanityPharmaDisplayUrlMode(enum.IntEnum):
"""
Enum describing possible display modes for vanity pharma URLs.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
MANUFACTURER_WEBSITE_URL (int): Replace vanity pharma URL with manufacturer website url.
WEBSITE_DESCRIPTION (int): Replace vanity pharma URL with description of the website.
"""
UNSPECIFIED = 0
UNKNOWN = 1
MANUFACTURER_WEBSITE_URL = 2
WEBSITE_DESCRIPTION = 3
'''
VanityPharmaDisplayUrlModeEnum = VanityPharmaDisplayUrlModeEnum() # For __getattribute__
class VanityPharmaTextEnum(_CreateEnumTypeUponFirstAccess):
VanityPharmaTextEnum = '''\
class VanityPharmaText(enum.IntEnum):
"""
Enum describing possible text.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
PRESCRIPTION_TREATMENT_WEBSITE_EN (int): Prescription treatment website with website content in English.
PRESCRIPTION_TREATMENT_WEBSITE_ES (int): Prescription treatment website with website content in Spanish
(Sitio de tratamientos con receta).
PRESCRIPTION_DEVICE_WEBSITE_EN (int): Prescription device website with website content in English.
PRESCRIPTION_DEVICE_WEBSITE_ES (int): Prescription device website with website content in Spanish (Sitio de
dispositivos con receta).
MEDICAL_DEVICE_WEBSITE_EN (int): Medical device website with website content in English.
MEDICAL_DEVICE_WEBSITE_ES (int): Medical device website with website content in Spanish (Sitio de
dispositivos médicos).
PREVENTATIVE_TREATMENT_WEBSITE_EN (int): Preventative treatment website with website content in English.
PREVENTATIVE_TREATMENT_WEBSITE_ES (int): Preventative treatment website with website content in Spanish (Sitio de
tratamientos preventivos).
PRESCRIPTION_CONTRACEPTION_WEBSITE_EN (int): Prescription contraception website with website content in English.
PRESCRIPTION_CONTRACEPTION_WEBSITE_ES (int): Prescription contraception website with website content in Spanish (Sitio
de anticonceptivos con receta).
PRESCRIPTION_VACCINE_WEBSITE_EN (int): Prescription vaccine website with website content in English.
PRESCRIPTION_VACCINE_WEBSITE_ES (int): Prescription vaccine website with website content in Spanish (Sitio de
vacunas con receta).
"""
UNSPECIFIED = 0
UNKNOWN = 1
PRESCRIPTION_TREATMENT_WEBSITE_EN = 2
PRESCRIPTION_TREATMENT_WEBSITE_ES = 3
PRESCRIPTION_DEVICE_WEBSITE_EN = 4
PRESCRIPTION_DEVICE_WEBSITE_ES = 5
MEDICAL_DEVICE_WEBSITE_EN = 6
MEDICAL_DEVICE_WEBSITE_ES = 7
PREVENTATIVE_TREATMENT_WEBSITE_EN = 8
PREVENTATIVE_TREATMENT_WEBSITE_ES = 9
PRESCRIPTION_CONTRACEPTION_WEBSITE_EN = 10
PRESCRIPTION_CONTRACEPTION_WEBSITE_ES = 11
PRESCRIPTION_VACCINE_WEBSITE_EN = 12
PRESCRIPTION_VACCINE_WEBSITE_ES = 13
'''
VanityPharmaTextEnum = VanityPharmaTextEnum() # For __getattribute__
class WebpageConditionOperandEnum(_CreateEnumTypeUponFirstAccess):
WebpageConditionOperandEnum = '''\
class WebpageConditionOperand(enum.IntEnum):
"""
The webpage condition operand in webpage criterion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
URL (int): Operand denoting a webpage URL targeting condition.
CATEGORY (int): Operand denoting a webpage category targeting condition.
PAGE_TITLE (int): Operand denoting a webpage title targeting condition.
PAGE_CONTENT (int): Operand denoting a webpage content targeting condition.
CUSTOM_LABEL (int): Operand denoting a webpage custom label targeting condition.
"""
UNSPECIFIED = 0
UNKNOWN = 1
URL = 2
CATEGORY = 3
PAGE_TITLE = 4
PAGE_CONTENT = 5
CUSTOM_LABEL = 6
'''
WebpageConditionOperandEnum = WebpageConditionOperandEnum() # For __getattribute__
class WebpageConditionOperatorEnum(_CreateEnumTypeUponFirstAccess):
WebpageConditionOperatorEnum = '''\
class WebpageConditionOperator(enum.IntEnum):
"""
The webpage condition operator in webpage criterion.
Attributes:
UNSPECIFIED (int): Not specified.
UNKNOWN (int): Used for return value only. Represents value unknown in this version.
EQUALS (int): The argument web condition is equal to the compared web condition.
CONTAINS (int): The argument web condition is part of the compared web condition.
"""
UNSPECIFIED = 0
UNKNOWN = 1
EQUALS = 2
CONTAINS = 3
'''
WebpageConditionOperatorEnum = WebpageConditionOperatorEnum() # For __getattribute__
class YoutubeVideoRegistrationErrorEnum(_CreateEnumTypeUponFirstAccess):
YoutubeVideoRegistrationErrorEnum = '''\
class YoutubeVideoRegistrationError(enum.IntEnum):
"""
Enum describing YouTube video registration errors.
Attributes:
UNSPECIFIED (int): Enum unspecified.
UNKNOWN (int): The received error code is not known in this version.
VIDEO_NOT_FOUND (int): Video to be registered wasn't found.
VIDEO_NOT_ACCESSIBLE (int): Video to be registered is not accessible (e.g. private).
VIDEO_NOT_ELIGIBLE (int): Video to be registered is not eligible (e.g. mature content).
"""
UNSPECIFIED = 0
UNKNOWN = 1
VIDEO_NOT_FOUND = 2
VIDEO_NOT_ACCESSIBLE = 3
VIDEO_NOT_ELIGIBLE = 4
'''
YoutubeVideoRegistrationErrorEnum = YoutubeVideoRegistrationErrorEnum() # For __getattribute__
| 46.501767 | 166 | 0.687753 |
a1297148dba1466ea88f7c774f54616096def0ec | 90 | py | Python | DataAnalysis/ProcessingTools/Queues/__init__.py | AdamSwenson/TwitterProject | 8c5dc7a57eac611b555058736d609f2f204cb836 | [
"MIT"
] | null | null | null | DataAnalysis/ProcessingTools/Queues/__init__.py | AdamSwenson/TwitterProject | 8c5dc7a57eac611b555058736d609f2f204cb836 | [
"MIT"
] | 6 | 2020-03-24T17:34:24.000Z | 2021-12-13T20:14:34.000Z | DataAnalysis/ProcessingTools/Queues/__init__.py | AdamSwenson/TwitterProject | 8c5dc7a57eac611b555058736d609f2f204cb836 | [
"MIT"
] | null | null | null | """
Created by adam on 5/4/18
"""
__author__ = 'adam'
if __name__ == '__main__':
pass | 12.857143 | 26 | 0.611111 |
11e0852edce6d92dee393e4fb99e5f1aeb16e80a | 476 | py | Python | packages/python/plotly/plotly/validators/layout/updatemenu/_borderwidth.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/updatemenu/_borderwidth.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/updatemenu/_borderwidth.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="layout.updatemenu", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 31.733333 | 82 | 0.64916 |
ad5f43ff5c201e5110a5d48df8de10b5ba4570a3 | 9,924 | py | Python | bui/layout/radio.py | vincent-lg/bui | a4a783fea2269190fb69c6491a4bc91753ccc580 | [
"BSD-3-Clause"
] | 3 | 2019-07-28T17:15:56.000Z | 2020-04-12T14:05:12.000Z | bui/layout/radio.py | vincent-lg/bui | a4a783fea2269190fb69c6491a4bc91753ccc580 | [
"BSD-3-Clause"
] | null | null | null | bui/layout/radio.py | vincent-lg/bui | a4a783fea2269190fb69c6491a4bc91753ccc580 | [
"BSD-3-Clause"
] | null | null | null | """Radio button object represented in layout."""
from bui.layout.attr import Attr
from bui.layout.component import Component
class RadioButton(Component):
"""
Create a radio button with multiple choices.
A radio button is represented by multiple, mutually-exclusive buttons.
Each button in the group can't be selected without the other ones
in the same group being de-selected.
```
<window title="Table demonstration">
<radio id=choices x=2 y=4>
<choice>Choice 1</choice>
<choice>Choice 2</choice>
<choice>Choice 3</choice>
</radio>
</window>
```
Using the `<radio>` landmark creates an empty radio button group.
A radio button must have at least two choices (if less, consider using
a [checkbox](checkbox.md)). You can use the `<choice>` tag inside
of the `<radio>` to create a selectable choice in the group, or use the
method `add_choice` of a [RadioButton](../../widget/radio_button.py)
widget to add the button programmatically.
## Attributes
| Name | Required | Description | Example |
| ------------ | -------- | ------------------------ | ----------- |
| `x` | Yes | The widget's horizontal | `<radio |
| | | position in columns (0 | x=5>` |
| | | is left). This position | |
| | | is relative to the | |
| | | window width. | |
| `y` | Yes | The widget's vertical | `<radio |
| | | position in rows (0 | y=2>` |
| | | is at the top). This | |
| | | position is relative to | |
| | | the window height. | |
| `id` | Yes | The radio button group | `<radio |
| | | identifier (ID). Contrary| id=actions>`|
| | | to most other tags, you | |
| | | need to set this | |
| | | attribute. | |
| `width` | No | The widget width, that | `<radio |
| | | is, the number of | width=2>` |
| | | columns it will use in | |
| | | the window grid. A | |
| | | widget with a width of | |
| | | 2 will stretch one | |
| | | additional column to the | |
| | | right. A widget with `x` | |
| | | set to 2 and `width` set | |
| | | to 3 will span `x=2`, | |
| | | `x=3`, and `x=4`. The | |
| | | default is 1, so a | |
| | | widget will remain in | |
| | | its `x` column. | |
| `height` | No | The widget height, that | `<radio |
| | | is, the number of | height=2>` |
| | | rows it will use in | |
| | | the window grid. A | |
| | | widget with a height of | |
| | | 2 will stretch one | |
| | | additional row downward. | |
| | | A widget with `y` set | |
| | | to 2 and `height` set | |
| | | to 3 will span `y=2`, | |
| | | `y=3`, and `y=4`. The | |
| | | default is 1, so a | |
| | | widget will remain in | |
| | | its `y` row. | |
See also the [choice](./choice.md) tag to define the choices
in a radio button group.
> Note: the radio button identifier is mandatory. You cannot dcreate a
radio button without a valid identifier. The reason for this constraint
is due to the fact that this widget, in particular, will be edited
in your application. Lacking an identifier, you won't be able to do
that.
## Data
The [radio button widget](../../widget/radio_button.md) can be manipulated
to add and remove choices, and know the selected choice.
| Attribute | Meaning and type | Example |
| -------------- | ---------------- | --------------------------- |
| `choices` | The available | `self.choices = (("ch1", |
| | choices (lsit) | "Choice 1"))` |
| `selected` | ID of the | `self.selected = "ch1"` |
| | selected choice | |
| | (str) or indice | |
| | of the choice if | |
| | no ID is set | |
| | (int). | |
You can easily update the entire radio button this way. Just set the
`choices` property on the
[radio button widget](../../widget/RadioButton.md) instance, specifying
a list of choices where each choice is a tuple of two values: ID
(as a `str`) and label to be displayed (as a `str`). Th ID will be
returned when this choice is selected:
def on_click_update(self):
# A button of ID 'update' was clicked
# There is a radio button of ID 'actions' in the window
actions = self["actions"]
actions.choices = (
("update", "Update the software"),
("repair", "Repair the software"),
("reinstall", "Remove the software configuration and install it again"),
("remove", "Uninstall this software"),
)
action = actions.selected
# action will be either "update", "repair", "reinstall" or "remove"
# You can of course change the selected choice
actions.selected = "reinstall"
## Controls
| Control | Method | Description |
| ------------------------------- | ------------ | ---------------- |
| [focus](../../control/focus.md) | `on_focus` | The radio |
| | | button is |
| | | focused or |
| | | loses focus. |
| [init](../../control/init.md) | `on_init` | The radio is |
| | | ready to be |
| | | displayed, but |
| | | is not displayed |
| | | just yet. |
| [press](../../control/press.md) | `on_press` | The user presses |
| | | on a key from her|
| | | keyboard. This |
| | | control can have |
| | | sub-controls. |
| [release](../../ | `on_release` | The user |
| control/release.md) | | relases a key on |
| | | her keyboard. |
| | | This control can |
| | | have sub- |
| | | controls. |
| [select](../../ | `on_select` | The selected |
| control/select.md) | | choice has |
| | | changed for |
| | | this radio |
| | | button. |
| [type](../../control/type.md) | `on_type` | The user types |
| | | a character |
| | | using her |
| | | keyboard. This |
| | | control can have |
| | | sub-controls. |
class MainWindow(Window):
def on_select_actions(self, widget, selected):
print(f"The user selected the action {selected}.")
"""
tag_name = "radio"
attrs = (
Attr("x", help="The widget horizontal position", type=int),
Attr("y", help="The widget vertical position", type=int),
Attr("id", help="The widget identifier"),
Attr("width", help="The widget width", type=int, default=1),
Attr("height", help="The widget height", type=int, default=1),
)
def __init__(self, layout, parent, x, y, id, width=1, height=1):
super().__init__(layout, parent)
self.x = x
self.y = y
self.id = id
self.width = width
self.height = height
| 52.787234 | 88 | 0.366385 |
9324713e71eeec4389cfc11ae3d9fbbc0e32cb7a | 2,694 | py | Python | setup.py | MelkyFB-zz/home-assistant | 81453e9508d97270674415f0b197c69a60355e51 | [
"Apache-2.0"
] | 1 | 2018-10-24T02:43:48.000Z | 2018-10-24T02:43:48.000Z | setup.py | hassbeat/home-assistant | a08e5efe53b3d99fba3c6d46f37e1a8572a39278 | [
"Apache-2.0"
] | null | null | null | setup.py | hassbeat/home-assistant | a08e5efe53b3d99fba3c6d46f37e1a8572a39278 | [
"Apache-2.0"
] | 1 | 2019-02-07T11:50:04.000Z | 2019-02-07T11:50:04.000Z | #!/usr/bin/env python3
"""Home Assistant setup script."""
from setuptools import setup, find_packages
import homeassistant.const as hass_const
PROJECT_NAME = 'Home Assistant'
PROJECT_PACKAGE_NAME = 'homeassistant'
PROJECT_LICENSE = 'Apache License 2.0'
PROJECT_AUTHOR = 'The Home Assistant Authors'
PROJECT_COPYRIGHT = ' 2013-2018, {}'.format(PROJECT_AUTHOR)
PROJECT_URL = 'https://home-assistant.io/'
PROJECT_EMAIL = 'hello@home-assistant.io'
PROJECT_DESCRIPTION = ('Open-source home automation platform '
'running on Python 3.')
PROJECT_LONG_DESCRIPTION = ('Home Assistant is an open-source '
'home automation platform running on Python 3. '
'Track and control all devices at home and '
'automate control. '
'Installation in less than a minute.')
PROJECT_CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Home Automation'
]
PROJECT_GITHUB_USERNAME = 'home-assistant'
PROJECT_GITHUB_REPOSITORY = 'home-assistant'
PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME)
GITHUB_PATH = '{}/{}'.format(
PROJECT_GITHUB_USERNAME, PROJECT_GITHUB_REPOSITORY)
GITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH)
DOWNLOAD_URL = '{}/archive/{}.zip'.format(GITHUB_URL, hass_const.__version__)
PACKAGES = find_packages(exclude=['tests', 'tests.*'])
REQUIRES = [
'requests==2.18.4',
'pyyaml>=3.11,<4',
'pytz>=2017.02',
'pip>=8.0.3',
'jinja2>=2.10',
'voluptuous==0.11.1',
'typing>=3,<4',
'aiohttp==3.1.0',
'async_timeout==2.0.1',
'astral==1.6',
'certifi>=2017.4.17',
'attrs==17.4.0',
]
MIN_PY_VERSION = '.'.join(map(str, hass_const.REQUIRED_PYTHON_VER))
setup(
name=PROJECT_PACKAGE_NAME,
version=hass_const.__version__,
license=PROJECT_LICENSE,
url=PROJECT_URL,
download_url=DOWNLOAD_URL,
author=PROJECT_AUTHOR,
author_email=PROJECT_EMAIL,
description=PROJECT_DESCRIPTION,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=REQUIRES,
python_requires='>={}'.format(MIN_PY_VERSION),
test_suite='tests',
keywords=['home', 'automation'],
entry_points={
'console_scripts': [
'hass = homeassistant.__main__:main'
]
},
classifiers=PROJECT_CLASSIFIERS,
)
| 31.694118 | 77 | 0.660356 |
8b8e343ce5e13ffd2c48aaed6fca2f02fa249892 | 6,835 | py | Python | render/BitcoinTradingGraph.py | botemple/Bitcoin-Trader-RL | 0da28ccf4fed4bdfcce7c77de2c1c8d04cad7477 | [
"MIT"
] | 10 | 2019-09-08T12:44:51.000Z | 2021-07-19T09:05:21.000Z | render/BitcoinTradingGraph.py | botemple/Bitcoin-Trader-RL | 0da28ccf4fed4bdfcce7c77de2c1c8d04cad7477 | [
"MIT"
] | null | null | null | render/BitcoinTradingGraph.py | botemple/Bitcoin-Trader-RL | 0da28ccf4fed4bdfcce7c77de2c1c8d04cad7477 | [
"MIT"
] | 2 | 2021-03-25T08:43:16.000Z | 2021-04-20T12:53:40.000Z |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
from datetime import datetime
# finance module is no longer part of matplotlib
# see: https://github.com/matplotlib/mpl_finance
from mpl_finance import candlestick_ochl as candlestick
style.use('ggplot')
VOLUME_CHART_HEIGHT = 0.33
class BitcoinTradingGraph:
"""A Bitcoin trading visualization using matplotlib made to render OpenAI gym environments"""
def __init__(self, df, title=None):
self.df = df
self.net_worths = np.zeros(len(df))
# Create a figure on screen and set the title
fig = plt.figure()
fig.suptitle(title)
# Create top subplot for net worth axis
self.net_worth_ax = plt.subplot2grid(
(6, 1), (0, 0), rowspan=2, colspan=1)
# Create bottom subplot for shared price/volume axis
self.price_ax = plt.subplot2grid(
(6, 1), (2, 0), rowspan=8, colspan=1, sharex=self.net_worth_ax)
# Create a new axis for volume which shares its x-axis with price
self.volume_ax = self.price_ax.twinx()
# Add padding to make graph easier to view
plt.subplots_adjust(left=0.11, bottom=0.24,
right=0.90, top=0.90, wspace=0.2, hspace=0)
# Show the graph without blocking the rest of the program
plt.show(block=False)
def _render_net_worth(self, current_step, net_worth, step_range, dates):
# Clear the frame rendered last step
self.net_worth_ax.clear()
# Plot net worths
self.net_worth_ax.plot_date(
dates, self.net_worths[step_range], '-', label='Net Worth')
# Show legend, which uses the label we defined for the plot above
self.net_worth_ax.legend()
legend = self.net_worth_ax.legend(loc=2, ncol=2, prop={'size': 8})
legend.get_frame().set_alpha(0.4)
last_date = self.df['Timestamp'].values[current_step]
last_net_worth = self.net_worths[current_step]
# Annotate the current net worth on the net worth graph
self.net_worth_ax.annotate('{0:.2f}'.format(net_worth), (last_date, last_net_worth),
xytext=(last_date, last_net_worth),
bbox=dict(boxstyle='round',
fc='w', ec='k', lw=1),
color="black",
fontsize="small")
# Add space above and below min/max net worth
self.net_worth_ax.set_ylim(
min(self.net_worths[np.nonzero(self.net_worths)]) / 1.25, max(self.net_worths) * 1.25)
def _render_price(self, current_step, net_worth, step_range, dates):
self.price_ax.clear()
# Format data for OHCL candlestick graph
candlesticks = zip(dates,
self.df['Open'].values[step_range], self.df['Close'].values[step_range],
self.df['High'].values[step_range], self.df['Low'].values[step_range])
# Plot price using candlestick graph from mpl_finance
candlestick(self.price_ax, candlesticks, width=2)
last_date = self.df['Timestamp'].values[current_step]
last_close = self.df['Close'].values[current_step]
last_high = self.df['High'].values[current_step]
# Print the current price to the price axis
self.price_ax.annotate('{0:.2f}'.format(last_close), (last_date, last_close),
xytext=(last_date, last_high),
bbox=dict(boxstyle='round',
fc='w', ec='k', lw=1),
color="black",
fontsize="small")
# Shift price axis up to give volume chart space
ylim = self.price_ax.get_ylim()
self.price_ax.set_ylim(ylim[0] - (ylim[1] - ylim[0])
* VOLUME_CHART_HEIGHT, ylim[1])
def _render_volume(self, current_step, net_worth, step_range, dates):
self.volume_ax.clear()
volume = np.array(self.df['Volume_(BTC)'].values[step_range])
pos = self.df['Open'].values[step_range] - \
self.df['Close'].values[step_range] < 0
neg = self.df['Open'].values[step_range] - \
self.df['Close'].values[step_range] > 0
# Color volume bars based on price direction on that date
self.volume_ax.bar(dates[pos], volume[pos], width=2, color='g')
self.volume_ax.bar(dates[neg], volume[neg], width=2, color='r')
self.volume_ax.yaxis.set_ticks([])
def _render_trades(self, current_step, trades, step_range):
for trade in trades:
if trade['step'] in step_range:
date = self.df['Timestamp'].values[trade['step']]
close = self.df['Close'].values[trade['step']]
high = self.df['High'].values[trade['step']]
low = self.df['Low'].values[trade['step']]
if trade['type'] == 'buy':
high_low = low
color = 'g'
else:
high_low = high
color = 'r'
total = '{0:.2f}'.format(trade['total'])
self.price_ax.annotate('$' + str(total), (date, close),
xytext=(date, high_low),
bbox=dict(boxstyle='round',
fc='w', ec='k', lw=1, alpha=0.4),
color=color,
alpha=0.4,
fontsize="small")
def render(self, current_step, net_worth, trades, window_size=40):
self.net_worths[current_step] = net_worth
window_start = max(current_step - window_size, 0)
step_range = range(window_start, current_step + 1)
dates = self.df['Timestamp'].values[step_range]
self._render_net_worth(current_step, net_worth, step_range, dates)
self._render_price(current_step, net_worth, step_range, dates)
self._render_volume(current_step, net_worth, step_range, dates)
self._render_trades(current_step, trades, step_range)
date_labels = np.array([datetime.utcfromtimestamp(x).strftime(
'%Y-%m-%d %H:%M') for x in self.df['Timestamp'].values[step_range]])
self.price_ax.set_xticklabels(
date_labels, rotation=45, horizontalalignment='right')
# Hide duplicate net worth date labels
plt.setp(self.net_worth_ax.get_xticklabels(), visible=False)
# Necessary to view frames before they are unrendered
plt.pause(0.001)
def close(self):
plt.close()
| 40.205882 | 99 | 0.573958 |
84ede800b0b9cca1d7bdc1d6a25b37fee3de973b | 2,097 | py | Python | var/spack/repos/builtin/packages/biobambam2/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/biobambam2/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/biobambam2/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Biobambam2(AutotoolsPackage):
"""Tools for early stage alignment file processing"""
homepage = "https://gitlab.com/german.tischler/biobambam2"
url = "https://gitlab.com/german.tischler/biobambam2/-/archive/2.0.177-release-20201112105453/biobambam2-2.0.177-release-20201112105453.tar.gz"
version('2.0.177', sha256='ad0a418fb49a31996a105a1a275c0d1dfc8b84aa91d48fa1efb6ff4fe1e74181',
url='https://gitlab.com/german.tischler/biobambam2/-/archive/2.0.177-release-20201112105453/biobambam2-2.0.177-release-20201112105453.tar.gz')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('libmaus2')
test_src_dir = 'test'
def configure_args(self):
args = ['--with-libmaus2={0}'.format(self.spec['libmaus2'].prefix)]
return args
def _fix_shortsort(self):
"""Fix the testshortsort.sh file copied during installation."""
test_dir = join_path(self.install_test_root, self.test_src_dir)
filter_file('../src/', '', join_path(test_dir, 'testshortsort.sh'))
@run_after('install')
def cache_test_sources(self):
"""Copy the test source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources(self.test_src_dir)
self._fix_shortsort()
def test(self):
"""Perform stand-alone/smoke test on installed package."""
test_dir = join_path(self.test_suite.current_test_cache_dir,
self.test_src_dir)
self.run_test('sh', ['testshortsort.sh'],
expected='Alignments sorted by coordinate.',
purpose='test: checking alignments',
work_dir=test_dir)
| 41.94 | 154 | 0.67525 |
5734e017dc0541fc25d120148167be91c2166147 | 548 | py | Python | utils/print_rejected_outlinks.py | truxonjm/BrozzlerAdmin | 511fe3f07282cb9499e97877e62d7418cb9c1def | [
"Apache-2.0"
] | 2 | 2019-12-23T23:33:46.000Z | 2020-11-14T00:52:15.000Z | utils/print_rejected_outlinks.py | truxonjm/BrozzlerAdmin | 511fe3f07282cb9499e97877e62d7418cb9c1def | [
"Apache-2.0"
] | 1 | 2017-10-20T13:28:50.000Z | 2017-10-20T17:13:22.000Z | utils/print_rejected_outlinks.py | truxonjm/BrozzlerAdmin | 511fe3f07282cb9499e97877e62d7418cb9c1def | [
"Apache-2.0"
] | 1 | 2021-06-12T11:40:02.000Z | 2021-06-12T11:40:02.000Z | import doublethink
from argparse import ArgumentParser
parser = ArgumentParser(description="Utility to print all outlinks from a job.")
parser.add_argument('job_id',help="job_id we want to print the rejected outlinks")
parser.add_argument('--rethinkdb_server', default="localhost:28015", help="RethinkDB server")
args = parser.parse_args()
rr = doublethink.Rethinker([args.rethinkdb_server], 'brozzler')
links = rr.table('pages').filter({'job_id': args.job_id })['outlinks']['rejected'].run()
for link in links:
for e in link:
print(e)
| 36.533333 | 93 | 0.75 |
5003b6fb507bbca926f169be33e5ede4a2a36812 | 527 | py | Python | index/models.py | GadirMirzayev/Django-E-commerce | 0ca289fdf584b29636a8fc9416319defad0be5a5 | [
"MIT"
] | 1 | 2021-08-20T07:44:39.000Z | 2021-08-20T07:44:39.000Z | index/models.py | GadirMirzayev/Django-E-commerce | 0ca289fdf584b29636a8fc9416319defad0be5a5 | [
"MIT"
] | null | null | null | index/models.py | GadirMirzayev/Django-E-commerce | 0ca289fdf584b29636a8fc9416319defad0be5a5 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Subscriber(models.Model):
# information's
email = models.EmailField('E-mail', max_length=63)
# moderation's
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Subscriber'
verbose_name_plural = 'Subscribers'
ordering = ('-created_at',)
def __str__(self):
return self.email | 26.35 | 56 | 0.688805 |
e6cd7b5df9e506ba7a3e77fc3e6763aa18802799 | 53 | py | Python | SadapWA.py | McybearX/GameTTs | d62b675394833038cb7df3992af0b3de08ec4fa6 | [
"Apache-2.0"
] | null | null | null | SadapWA.py | McybearX/GameTTs | d62b675394833038cb7df3992af0b3de08ec4fa6 | [
"Apache-2.0"
] | null | null | null | SadapWA.py | McybearX/GameTTs | d62b675394833038cb7df3992af0b3de08ec4fa6 | [
"Apache-2.0"
] | null | null | null | os.system("xdg-open https://youtube.com/c/MBEWLEGS")
| 26.5 | 52 | 0.735849 |
96f1b94a0f4a437fb506f1c3321640141bbdaaa1 | 6,434 | py | Python | openstack_dashboard/dashboards/project/access_and_security/floating_ips/tables.py | aristanetworks/horizon | 6b4ba5194d46360bf1a436b6f9531facfbf5084a | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/access_and_security/floating_ips/tables.py | aristanetworks/horizon | 6b4ba5194d46360bf1a436b6f9531facfbf5084a | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/access_and_security/floating_ips/tables.py | aristanetworks/horizon | 6b4ba5194d46360bf1a436b6f9531facfbf5084a | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django import shortcuts
from django.utils.http import urlencode
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class AllocateIP(tables.LinkAction):
name = "allocate"
verbose_name = _("Allocate IP To Project")
classes = ("ajax-modal",)
icon = "download-alt"
url = "horizon:project:access_and_security:floating_ips:allocate"
def single(self, data_table, request, *args):
return shortcuts.redirect('horizon:project:access_and_security:index')
def allowed(self, request, volume=None):
usages = quotas.tenant_quota_usages(request)
if usages['floating_ips']['available'] <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Allocate IP To Project")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:allocate_floating_ip"),)
return POLICY_CHECK(policy, request)
class ReleaseIPs(tables.BatchAction):
name = "release"
action_present = _("Release")
action_past = _("Released")
data_type_singular = _("Floating IP")
data_type_plural = _("Floating IPs")
classes = ('btn-danger',)
icon = "arrow-up"
def allowed(self, request, fip=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:release_floating_ip"),)
return POLICY_CHECK(policy, request)
def action(self, request, obj_id):
api.network.tenant_floating_ip_release(request, obj_id)
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:associate_floating_ip"),)
return not fip.port_id and POLICY_CHECK(policy, request)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
params = urlencode({"ip_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate")
classes = ("btn-disassociate", "btn-danger")
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:disassociate_floating_ip"),)
return fip.port_id and POLICY_CHECK(policy, request)
def single(self, table, request, obj_id):
try:
fip = table.get_object_by_id(filters.get_int_or_uuid(obj_id))
api.network.floating_ip_disassociate(request, fip.id,
fip.port_id)
LOG.info('Disassociating Floating IP "%s".' % obj_id)
messages.success(request,
_('Successfully disassociated Floating IP: %s')
% fip.ip)
except Exception:
exceptions.handle(request,
_('Unable to disassociate floating IP.'))
return shortcuts.redirect('horizon:project:access_and_security:index')
def get_instance_info(instance):
return getattr(instance, "instance_name", None)
def get_instance_link(datum):
view = "horizon:project:instances:detail"
if datum.instance_id:
return urlresolvers.reverse(view, args=(datum.instance_id,))
else:
return None
class FloatingIPsTable(tables.DataTable):
ip = tables.Column("ip",
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
instance = tables.Column(get_instance_info,
link=get_instance_link,
verbose_name=_("Instance"),
empty_value="-")
pool = tables.Column("pool_name",
verbose_name=_("Floating IP Pool"),
empty_value="-")
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, datum):
return datum.ip
class Meta:
name = "floating_ips"
verbose_name = _("Floating IPs")
table_actions = (AllocateIP, ReleaseIPs)
row_actions = (AssociateIP, DisassociateIP, ReleaseIPs)
| 36.146067 | 78 | 0.632577 |
220aad9bd6862c3b0e8bcaa6d4f340334793916b | 17,723 | py | Python | DPGAnalysis/SiStripTools/test/manyfederrors_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DPGAnalysis/SiStripTools/test/manyfederrors_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DPGAnalysis/SiStripTools/test/manyfederrors_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("ManyFEDErrors")
#prepare options
options = VarParsing.VarParsing("analysis")
options.register ('globalTag',
"DONOTEXIST",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"GlobalTag")
options.parseArguments()
#
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
fileMode = cms.untracked.string("FULLMERGE")
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cout.enable = cms.untracked.bool(True)
process.MessageLogger.cout.threshold = cms.untracked.string("DEBUG")
process.MessageLogger.cout.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cout.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000)
)
process.MessageLogger.cerr.enable = cms.untracked.bool(True)
process.MessageLogger.cerr.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cerr.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100000)
)
process.MessageLogger.debugModules=cms.untracked.vstring("eventtimedistribution")
#------------------------------------------------------------------
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
#--------------------------------------
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.froml1abcHEs = cms.EDProducer("EventWithHistoryProducerFromL1ABC",
l1ABCCollection=cms.InputTag("scalersRawToDigi")
)
process.load("DPGAnalysis.SiStripTools.apvcyclephaseproducerfroml1tsDB_cfi")
process.load("DPGAnalysis.SiStripTools.l1TSDebugger_cfi")
process.load("DPGAnalysis.SiStripTools.eventtimedistribution_cfi")
process.eventtimedistribution.historyProduct = cms.InputTag("froml1abcHEs")
process.eventtimedistribution.dbxHistosParams = cms.untracked.VPSet(
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(0),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(1),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(2),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(3),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(4),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(5),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(6),secondEvent=cms.uint32(7)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(0),secondEvent=cms.uint32(6)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(1),secondEvent=cms.uint32(6)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(2),secondEvent=cms.uint32(6)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(3),secondEvent=cms.uint32(6)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(4),secondEvent=cms.uint32(6)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(5),secondEvent=cms.uint32(6)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(0),secondEvent=cms.uint32(5)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(1),secondEvent=cms.uint32(5)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(2),secondEvent=cms.uint32(5)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(3),secondEvent=cms.uint32(5)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(4),secondEvent=cms.uint32(5)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(0),secondEvent=cms.uint32(4)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(1),secondEvent=cms.uint32(4)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(2),secondEvent=cms.uint32(4)),
cms.PSet(nbins=cms.int32(2000),min=cms.double(-0.5),max=cms.double(3999.5),firstEvent=cms.uint32(3),secondEvent=cms.uint32(4)),
cms.PSet(nbins=cms.int32(1000),min=cms.double(-0.5),max=cms.double(999.5),firstEvent=cms.uint32(0),secondEvent=cms.uint32(1)),
)
process.eventtimedistrmanyfederrorsallthr = process.eventtimedistribution.clone()
process.eventtimedistrmanyfederrorshighthr = process.eventtimedistribution.clone()
process.eventtimedistrmanyfederrorsmidthr = process.eventtimedistribution.clone()
process.eventtimedistrmanyfederrorslowthr = process.eventtimedistribution.clone()
process.eventtimedistrnomanyfederrors = process.eventtimedistribution.clone()
process.seqEventHistoryReco = cms.Sequence(process.froml1abcHEs + process.APVPhases + process.l1TSDebugger)
process.apvcyclephasemonitor = cms.EDAnalyzer('APVCyclePhaseMonitor',
apvCyclePhaseCollection = cms.InputTag("APVPhases"),
maxLSBeforeRebin = cms.untracked.uint32(250),
startingLSFraction = cms.untracked.uint32(16),
selectedPartitions = cms.untracked.vstring("TI","TO","TP","TM"),
selectedVectorPartitions = cms.untracked.vstring("Any")
)
process.seqEventHistory = cms.Sequence(process.eventtimedistribution + process.apvcyclephasemonitor)
process.seqEventHistoryManyFEDErrorsAllThr = cms.Sequence(process.eventtimedistrmanyfederrorsallthr)
process.seqEventHistoryManyFEDErrorsHighThr = cms.Sequence(process.eventtimedistrmanyfederrorshighthr)
process.seqEventHistoryManyFEDErrorsMidThr = cms.Sequence(process.eventtimedistrmanyfederrorsmidthr)
process.seqEventHistoryManyFEDErrorsLowThr = cms.Sequence(process.eventtimedistrmanyfederrorslowthr)
process.seqEventHistoryNoManyFEDErrors = cms.Sequence(process.eventtimedistrnomanyfederrors)
process.seqProducers = cms.Sequence(process.seqEventHistoryReco)
#do not ignore APVe mismatches
process.siStripDigis.DoAPVEmulatorCheck = cms.bool(True)
#process.siStripDigis.ErrorThreshold = cms.uint32(30000)
#process.siStripDigisMidThr = process.siStripDigis.clone(ErrorThreshold = cms.uint32(7174))
#process.siStripDigisLowThr = process.siStripDigis.clone(ErrorThreshold = cms.uint32(2800))
#process.siStripDigisAllThr = process.siStripDigis.clone(ErrorThreshold = cms.uint32(35500))
#process.seqSiStripDigis = cms.Sequence(process.siStripDigis + process.siStripDigisLowThr + process.siStripDigisMidThr + process.siStripDigisAllThr)
process.seqSiStripDigis = cms.Sequence(process.siStripDigis)
process.seqRECO = cms.Sequence(process.seqSiStripDigis +
process.scalersRawToDigi +
process.logErrorHarvester
)
#process.logErrorTooManyErrors = cms.EDFilter("LogErrorEventFilter",
# src = cms.InputTag("logErrorHarvester"),
# maxErrorFractionInLumi = cms.double(1.0),
# maxErrorFractionInRun = cms.double(1.0),
# maxSavedEventsPerLumiAndError = cms.uint32(1000000),
# # categoriesToIgnore = cms.vstring("HLTConfigProvider","FastCloningDisabled")
# categoriesToWatch = cms.vstring("TooManyErrors"),
# modulesToWatch = cms.vstring("SiStripRawToDigiModule:siStripDigis")
#
# )
#process.logErrorTooManyErrorsLowThr = process.logErrorTooManyErrors.clone(modulesToWatch = cms.vstring("SiStripRawToDigiModule:siStripDigisLowThr"))
#process.logErrorTooManyErrorsMidThr = process.logErrorTooManyErrors.clone(modulesToWatch = cms.vstring("SiStripRawToDigiModule:siStripDigisMidThr"))
#process.logErrorTooManyErrorsAllThr = process.logErrorTooManyErrors.clone(modulesToWatch = cms.vstring("SiStripRawToDigiModule:siStripDigisAllThr"))
process.filterFEDBadModuleHighThr = cms.EDFilter("FEDBadModuleFilter",
collectionName=cms.InputTag("siStripDigis"),
badModThr = cms.uint32(30000),
wantedHisto=cms.untracked.bool(False),
maxLSBeforeRebin = cms.untracked.uint32(250),
startingLSFraction = cms.untracked.uint32(16)
)
process.filterFEDBadModuleAllThr = process.filterFEDBadModuleHighThr.clone(badModThr = cms.uint32(35500),wantedHisto=cms.untracked.bool(True))
process.filterFEDBadModuleMidThr = process.filterFEDBadModuleHighThr.clone(badModThr = cms.uint32(7174),wantedHisto=cms.untracked.bool(False))
process.filterFEDBadModuleLowThr = process.filterFEDBadModuleHighThr.clone(badModThr = cms.uint32(2000),wantedHisto=cms.untracked.bool(False))
process.badModuleFED74 = cms.EDFilter("FEDBadModuleFilter",
collectionName=cms.InputTag("siStripDigis"),
badModThr = cms.uint32(0),
wantedHisto=cms.untracked.bool(True),
moduleList = cms.untracked.vuint32(369158204),
maxLSBeforeRebin = cms.untracked.uint32(250),
startingLSFraction = cms.untracked.uint32(16)
)
process.badModuleFED102 = cms.EDFilter("FEDBadModuleFilter",
collectionName=cms.InputTag("siStripDigis"),
badModThr = cms.uint32(0),
wantedHisto=cms.untracked.bool(True),
moduleList = cms.untracked.vuint32(369141837,369141833,369141829),
maxLSBeforeRebin = cms.untracked.uint32(250),
startingLSFraction = cms.untracked.uint32(16)
)
process.badModuleFED108 = cms.EDFilter("FEDBadModuleFilter",
collectionName=cms.InputTag("siStripDigis"),
badModThr = cms.uint32(0),
wantedHisto=cms.untracked.bool(True),
moduleList = cms.untracked.vuint32(369142074,369142077,369142078),
maxLSBeforeRebin = cms.untracked.uint32(250),
startingLSFraction = cms.untracked.uint32(16)
)
#process.seqTooManyErrorsAllThr = cms.Sequence(process.logErrorTooManyErrorsAllThr)
#process.seqTooManyErrorsHighThr = cms.Sequence(~process.logErrorTooManyErrorsAllThr + process.logErrorTooManyErrors)
#process.seqTooManyErrorsMidThr = cms.Sequence(~process.logErrorTooManyErrorsAllThr + ~process.logErrorTooManyErrors + process.logErrorTooManyErrorsMidThr)
#process.seqTooManyErrorsLowThr = cms.Sequence(~process.logErrorTooManyErrorsAllThr + ~process.logErrorTooManyErrors + ~process.logErrorTooManyErrorsMidThr +
# process.logErrorTooManyErrorsLowThr)
#process.seqNoTooManyErrors = cms.Sequence(~process.logErrorTooManyErrorsAllThr + ~process.logErrorTooManyErrors + ~process.logErrorTooManyErrorsMidThr +
# ~process.logErrorTooManyErrorsLowThr)
process.seqTooManyErrorsAllThr = cms.Sequence(process.filterFEDBadModuleAllThr)
process.seqTooManyErrorsHighThr = cms.Sequence(~process.filterFEDBadModuleAllThr + process.filterFEDBadModuleHighThr)
process.seqTooManyErrorsMidThr = cms.Sequence(~process.filterFEDBadModuleAllThr + ~process.filterFEDBadModuleHighThr +
process.filterFEDBadModuleMidThr)
process.seqTooManyErrorsLowThr = cms.Sequence(~process.filterFEDBadModuleAllThr + ~process.filterFEDBadModuleHighThr +
~process.filterFEDBadModuleMidThr +process.filterFEDBadModuleLowThr)
process.seqNoTooManyErrors = cms.Sequence(~process.filterFEDBadModuleAllThr + ~process.filterFEDBadModuleHighThr +
~process.filterFEDBadModuleMidThr + ~process.filterFEDBadModuleLowThr)
process.load("DQM.SiStripCommon.TkHistoMap_cff")
process.load("DQM.SiStripMonitorHardware.siStripFEDMonitor_Tier0_cff")
process.siStripFEDMonitor.BadMajorityInPartitionHistogramConfig.Enabled = True
process.siStripFEDMonitorAllThr = process.siStripFEDMonitor.clone( HistogramFolderName = cms.untracked.string('SiStrip/ReadoutView/FedSummaryAllThr'))
process.siStripFEDMonitorHighThr = process.siStripFEDMonitor.clone( HistogramFolderName = cms.untracked.string('SiStrip/ReadoutView/FedSummaryHighThr'))
process.siStripFEDMonitorMidThr = process.siStripFEDMonitor.clone( HistogramFolderName = cms.untracked.string('SiStrip/ReadoutView/FedSummaryMidThr'))
process.siStripFEDMonitorLowThr = process.siStripFEDMonitor.clone( HistogramFolderName = cms.untracked.string('SiStrip/ReadoutView/FedSummaryLowThr'))
process.siStripFEDMonitorNoErr = process.siStripFEDMonitor.clone( HistogramFolderName = cms.untracked.string('SiStrip/ReadoutView/FedSummaryNoErr'))
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load("Configuration.EventContent.EventContent_cff")
process.DQMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
outputCommands = process.DQMEventContent.outputCommands,
fileName = cms.untracked.string('manyfederrors_DQM.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('DQM')
)
)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
process.p0 = cms.Path(
process.seqRECO +
process.seqProducers +
process.siStripFEDMonitor +
process.seqEventHistory +
process.badModuleFED74 +
process.badModuleFED102 +
process.badModuleFED108
)
process.pnomanyfederrors = cms.Path(
process.seqRECO +
process.seqProducers +
process.seqNoTooManyErrors +
process.siStripFEDMonitorNoErr +
process.seqEventHistoryNoManyFEDErrors
)
process.pmanyfederrorslowthr = cms.Path(
process.seqRECO +
process.seqProducers +
process.seqTooManyErrorsLowThr +
process.siStripFEDMonitorLowThr +
process.seqEventHistoryManyFEDErrorsLowThr
)
process.pmanyfederrorsmidthr = cms.Path(
process.seqRECO +
process.seqProducers +
process.seqTooManyErrorsMidThr +
process.siStripFEDMonitorMidThr +
process.seqEventHistoryManyFEDErrorsMidThr
)
process.pmanyfederrorshighthr = cms.Path(
process.seqRECO +
process.seqProducers +
process.seqTooManyErrorsHighThr +
process.siStripFEDMonitorHighThr +
process.seqEventHistoryManyFEDErrorsHighThr
)
process.pmanyfederrorsallthr = cms.Path(
process.seqRECO +
process.seqProducers +
process.seqTooManyErrorsAllThr +
process.siStripFEDMonitorAllThr +
process.seqEventHistoryManyFEDErrorsAllThr
)
#----GlobalTag ------------------------
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.TFileService = cms.Service('TFileService',
fileName = cms.string('ManyFEDErrors.root')
)
#print process.dumpPython()
| 58.880399 | 157 | 0.680415 |
21516e9fd09718c5f6e7f9e0328f11d174452960 | 11,901 | py | Python | python/sparkdl/transformers/tf_image.py | arthurtibame/spark-deep-learning | 51cc6c030f3c4752c579aa29836666605477fa1d | [
"Apache-2.0"
] | 1 | 2020-06-29T23:45:08.000Z | 2020-06-29T23:45:08.000Z | python/sparkdl/transformers/tf_image.py | diprup/spark-deep-learning | 51cc6c030f3c4752c579aa29836666605477fa1d | [
"Apache-2.0"
] | null | null | null | python/sparkdl/transformers/tf_image.py | diprup/spark-deep-learning | 51cc6c030f3c4752c579aa29836666605477fa1d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from deprecated import deprecated
import numpy as np
import tensorflow as tf
import tensorframes as tfs # pylint: disable=import-error
from pyspark import Row
from pyspark.ml import Transformer
from pyspark.ml.image import ImageSchema
from pyspark.ml.param import Param, Params
from pyspark.sql.functions import udf
import sparkdl.graph.utils as tfx
import sparkdl.image.imageIO as imageIO
from sparkdl.param import keyword_only, HasInputCol, HasOutputCol, HasOutputMode
from sparkdl.param import SparkDLTypeConverters
import sparkdl.transformers.utils as utils
import sparkdl.utils.jvmapi as JVMAPI
__all__ = ['TFImageTransformer']
IMAGE_INPUT_TENSOR_NAME = tfx.tensor_name(utils.IMAGE_INPUT_PLACEHOLDER_NAME)
USER_GRAPH_NAMESPACE = 'given'
NEW_OUTPUT_PREFIX = 'sdl_flattened'
@deprecated(reason="TFImageTransformer will be removed in the next release of sparkdl. "
"Please use Pandas UDF for distributed model inference.")
class TFImageTransformer(Transformer, HasInputCol, HasOutputCol, HasOutputMode):
"""
Applies the Tensorflow graph to the image column in DataFrame.
Restrictions of the current API:
* Does not use minibatches, which is a major low-hanging fruit for performance.
* Only one output node can be specified.
* The output is expected to be an image or a 1-d vector.
* All images in the dataframe are expected be of the same numerical data type
(i.e. the dtype of the values in the numpy array representation is the same.)
We assume all graphs have a "minibatch" dimension (i.e. an unknown leading
dimension) in the tensor shapes.
.. note:: The input tensorflow graph should have appropriate weights constantified,
since a new session is created inside this transformer.
"""
graph = Param(
Params._dummy(),
"graph",
"A TensorFlow computation graph",
typeConverter=SparkDLTypeConverters.toTFGraph)
inputTensor = Param(
Params._dummy(),
"inputTensor",
"A TensorFlow tensor object or name representing the input image",
typeConverter=SparkDLTypeConverters.toTFTensorName)
outputTensor = Param(
Params._dummy(),
"outputTensor",
"A TensorFlow tensor object or name representing the output",
typeConverter=SparkDLTypeConverters.toTFTensorName)
channelOrder = Param(
Params._dummy(),
"channelOrder",
"Strign specifying the expected color channel order, can be one of L,RGB,BGR",
typeConverter=SparkDLTypeConverters.toChannelOrder)
@keyword_only
def __init__(self, channelOrder, inputCol=None, outputCol=None, graph=None,
inputTensor=IMAGE_INPUT_TENSOR_NAME, outputTensor=None, outputMode="vector"):
"""
__init__(self, channelOrder, inputCol=None, outputCol=None, graph=None,
inputTensor=IMAGE_INPUT_TENSOR_NAME, outputTensor=None, outputMode="vector")
:param: channelOrder: specify the ordering of the color channel, can be one of RGB,
BGR, L (grayscale)
"""
super(TFImageTransformer, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._setDefault(inputTensor=IMAGE_INPUT_TENSOR_NAME)
self.channelOrder = channelOrder
@keyword_only
def setParams(self, channelOrder=None, inputCol=None, outputCol=None, graph=None,
inputTensor=IMAGE_INPUT_TENSOR_NAME, outputTensor=None, outputMode="vector"):
"""
setParams(self, channelOrder=None, inputCol=None, outputCol=None, graph=None,
inputTensor=IMAGE_INPUT_TENSOR_NAME, outputTensor=None, outputMode="vector")
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setGraph(self, value):
return self._set(graph=value)
def setInputTensor(self, value):
return self._set(inputTensor=value)
def setOutputTensor(self, value):
return self._set(outputTensor=value)
def getGraph(self):
return self.getOrDefault(self.graph)
def getInputTensor(self):
tensor_name = self.getOrDefault(self.inputTensor)
return self.getGraph().get_tensor_by_name(tensor_name)
def getOutputTensor(self):
tensor_name = self.getOrDefault(self.outputTensor)
return self.getGraph().get_tensor_by_name(tensor_name)
def _transform(self, dataset):
graph = self.getGraph()
composed_graph = self._addReshapeLayers(graph, self._getImageDtype(dataset))
final_graph = self._stripGraph(composed_graph)
with final_graph.as_default(): # pylint: disable=not-context-manager
image = dataset[self.getInputCol()]
image_df_exploded = (dataset
.withColumn("__sdl_image_height", image.height)
.withColumn("__sdl_image_width", image.width)
.withColumn("__sdl_image_nchannels", image.nChannels)
.withColumn("__sdl_image_data", image.data)
) # yapf: disable
final_output_name = self._getFinalOutputTensorName()
output_tensor = final_graph.get_tensor_by_name(final_output_name)
final_df = (
tfs.map_rows([output_tensor], image_df_exploded,
feed_dict={
"height": "__sdl_image_height",
"width": "__sdl_image_width",
"num_channels": "__sdl_image_nchannels",
"image_buffer": "__sdl_image_data"})
.drop("__sdl_image_height", "__sdl_image_width", "__sdl_image_nchannels",
"__sdl_image_data")
) # yapf: disable
tfs_output_name = tfx.op_name(output_tensor, final_graph)
original_output_name = self._getOriginalOutputTensorName()
output_shape = final_graph.get_tensor_by_name(original_output_name).shape
output_mode = self.getOrDefault(self.outputMode)
# TODO: support non-1d tensors (return np.array).
if output_mode == "image":
return self._convertOutputToImage(final_df, tfs_output_name, output_shape)
else:
assert output_mode == "vector", "Unknown output mode: %s" % output_mode
return self._convertOutputToVector(final_df, tfs_output_name)
def _getImageDtype(self, dataset):
# This may not be the best way to get the type of image, but it is one way.
# Assumes that the dtype for all images is the same in the given dataframe.
pdf = dataset.select(self.getInputCol()).take(1)
img = pdf[0][self.getInputCol()]
img_type = imageIO.imageTypeByOrdinal(img.mode)
return img_type.dtype
# TODO: duplicate code, same functionality as sparkdl.graph.pieces.py::builSpImageConverter
# TODO: It should be extracted as a util function and shared
def _addReshapeLayers(self, tf_graph, dtype="uint8"):
input_tensor_name = self.getInputTensor().name
gdef = tf_graph.as_graph_def(add_shapes=True)
g = tf.Graph() # pylint: disable=invalid-name
with g.as_default(): # pylint: disable=not-context-manager
# Flat image data -> image dimensions
height = tf.placeholder(tf.int32, [], name="height")
width = tf.placeholder(tf.int32, [], name="width")
num_channels = tf.placeholder(tf.int32, [], name="num_channels")
image_buffer = tf.placeholder(tf.string, [], name="image_buffer")
# Note: the shape argument is required for tensorframes as it uses a
# slightly older version of tensorflow.
shape_tensor = tf.stack([height, width, num_channels], axis=0)
shape = tf.reshape(shape_tensor, shape=(3, ), name='shape')
if dtype == "uint8":
image_uint8 = tf.decode_raw(image_buffer, tf.uint8, name="decode_raw")
image_float = tf.to_float(image_uint8)
else:
assert dtype == "float32", "Unsupported dtype for image: %s" % dtype
image_float = tf.decode_raw(image_buffer, tf.float32, name="decode_raw")
image_reshaped = tf.reshape(image_float, shape, name="reshaped")
image_reshaped = imageIO.fixColorChannelOrdering(self.channelOrder, image_reshaped)
image_reshaped_expanded = tf.expand_dims(image_reshaped, 0, name="expanded")
# Add on the original graph
tf.import_graph_def(
gdef,
input_map={input_tensor_name: image_reshaped_expanded},
return_elements=[self.getOutputTensor().name],
name=USER_GRAPH_NAMESPACE)
# Flatten the output for tensorframes
output_node = g.get_tensor_by_name(self._getOriginalOutputTensorName())
_ = tf.reshape(output_node[0], shape=[-1], name=self._getFinalOutputOpName())
return g
# Sometimes the tf graph contains a bunch of stuff that doesn't lead to the
# output. TensorFrames does not like that, so we strip out the parts that
# are not necessary for the computation at hand.
def _stripGraph(self, tf_graph):
gdef = tfx.strip_and_freeze_until([self._getFinalOutputOpName()], tf_graph)
g = tf.Graph() # pylint: disable=invalid-name
with g.as_default(): # pylint: disable=not-context-manager
tf.import_graph_def(gdef, name='')
return g
def _getOriginalOutputTensorName(self):
return USER_GRAPH_NAMESPACE + '/' + self.getOutputTensor().name
def _getFinalOutputTensorName(self):
return NEW_OUTPUT_PREFIX + '_' + self.getOutputTensor().name
def _getFinalOutputOpName(self):
return tfx.op_name(self._getFinalOutputTensorName())
def _convertOutputToImage(self, df, tfs_output_col, output_shape):
assert len(output_shape) == 4, str(output_shape) + " does not have 4 dimensions"
height = int(output_shape[1])
width = int(output_shape[2])
def to_image(orig_image, numeric_data):
# Assume the returned image has float pixels but same #channels as input
mode = imageIO.imageTypeByName('CV_32FC%d' % orig_image.nChannels)
data = bytearray(np.array(numeric_data).astype(np.float32).tobytes())
nChannels = orig_image.nChannels
return Row(
origin="",
mode=mode.ord,
height=height,
width=width,
nChannels=nChannels,
data=data)
to_image_udf = udf(to_image, ImageSchema.imageSchema['image'].dataType)
resDf = df.withColumn(self.getOutputCol(),
to_image_udf(df[self.getInputCol()], df[tfs_output_col]))
return resDf.drop(tfs_output_col)
def _convertOutputToVector(self, df, tfs_output_col):
"""
Converts the output python list to MLlib Vector.
"""
return df\
.withColumn(self.getOutputCol(), JVMAPI.listToMLlibVectorUDF(df[tfs_output_col]))\
.drop(tfs_output_col)
| 45.423664 | 95 | 0.659188 |
2e09cfa56b53db6721fbed3306045c7d62615215 | 2,283 | py | Python | felpy/backend/wpg_converters.py | twguest/FELPy | 90ed49cc4bddd2188f3284168152d043144ae1fb | [
"Apache-2.0"
] | null | null | null | felpy/backend/wpg_converters.py | twguest/FELPy | 90ed49cc4bddd2188f3284168152d043144ae1fb | [
"Apache-2.0"
] | null | null | null | felpy/backend/wpg_converters.py | twguest/FELPy | 90ed49cc4bddd2188f3284168152d043144ae1fb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from felpy.model.wavefront import Wavefront
def complex_to_wpg(arr): ### converter
"""
converter function to transform complex wavefield into wpg style electric
field array
:param arr: complex wavefield array [x,y,t] (complex128 type)
:returns new_arr: wpg style electric field array [nx,ny,nz,2] (float64)
"""
new_arr = np.zeros([arr.shape[0], arr.shape[1], arr.shape[2], 2])
new_arr[:,:,:,0] = arr.real
new_arr[:,:,:,1] = arr.imag
return new_arr
def wavefront_from_array(cfr,nx,ny,nz,dx,dy,dz,ekev, pulse_duration = 40e-15, sigma = 4, **kwargs):
"""
function to produce a wpg wavefront object instance from a complex valued
wavefront definition
:param cfr: complex valued wavefield array [x,y,t] (complex128)
:param nx: number of pixels along x-axis
:param ny: number of pixels along y-axis
:param nz: number of pixels along z-axis
:param dx: pixels size along x-axis
:param dy: pixels size along y-axis
:param dz: pixels size along z-axis
:param ekev: energy in kev
:param pulse_duration: duration of the pulse (fwhm) in seconds, defaults to 40e-15
:param sigma: integer number of width multiples over which time-axis should be defined, defaults to 4
:returns: DESCRIPTION
"""
# Initialize empty wavefront.
wfr = Wavefront()
# Setup E-field.
wfr.data.arrEhor = np.zeros(shape=(nx, ny, nz, 2))
wfr.data.arrEver = np.zeros(shape=(nx, ny, nz, 2))
wfr.params.wEFieldUnit = 'sqrt(W/mm^2)'
wfr.params.photonEnergy = ekev * 1000
wfr.params.wDomain = 'time'
wfr.params.Mesh.nSlices = nz
wfr.params.Mesh.nx = nx
wfr.params.Mesh.ny = ny
wfr.params.Mesh.sliceMin = -pulse_duration*sigma / 2.
wfr.params.Mesh.sliceMax = pulse_duration*sigma / 2.
range_x = dx*nx
range_y = dy*ny
wfr.params.Mesh.xMin = -range_x / 2.
wfr.params.Mesh.xMax = range_x / 2.
wfr.params.Mesh.yMin = -range_y / 2.
wfr.params.Mesh.yMax = range_y / 2.
wfr.params.Rx = 2
wfr.params.Ry = 1
wfr.data.arrEhor = complex_to_wpg(cfr)
wfr.set_electric_field_representation('f')
wfr.custom_fields.update(**kwargs)
return wfr | 29.269231 | 105 | 0.653088 |
c3ddd993b2b9eb359c85c7ca7ea5971fb33df643 | 635 | py | Python | figures/Dots.py | damirmardanov/figure-generator | 1ed7ee6d423f2d7392e95529336b8f2d35ea65fd | [
"MIT"
] | null | null | null | figures/Dots.py | damirmardanov/figure-generator | 1ed7ee6d423f2d7392e95529336b8f2d35ea65fd | [
"MIT"
] | null | null | null | figures/Dots.py | damirmardanov/figure-generator | 1ed7ee6d423f2d7392e95529336b8f2d35ea65fd | [
"MIT"
] | null | null | null | import numpy.random as rnd
import random
from matplotlib.patches import Circle
from figures.Figure import Figures
class DotsBuilder(Figures):
@staticmethod
def generate(xy, count=0):
elements = []
for i in range(count):
xy = rnd.uniform(2.8, 7.2, 2)
radius = rnd.uniform(0.01, 0.1)
elements.append(Circle(xy=xy, radius=radius))
return elements
def generate_random(self, count=0):
xy = rnd.uniform(2.8, 7.2, 2)
if count == 0:
count = random.randint(3, 10)
return self.generate(xy, count=count)
| 23.518519 | 58 | 0.577953 |
01ea7f5ec8560871513426399bd579a41dd7e242 | 1,087 | py | Python | SampleEnvSchema142/ArrayByte.py | ScreamingUdder/python-streaming-deserialisation-tools | 9410445ca0f517c904745bd211af05cff40dec5d | [
"BSD-2-Clause"
] | null | null | null | SampleEnvSchema142/ArrayByte.py | ScreamingUdder/python-streaming-deserialisation-tools | 9410445ca0f517c904745bd211af05cff40dec5d | [
"BSD-2-Clause"
] | null | null | null | SampleEnvSchema142/ArrayByte.py | ScreamingUdder/python-streaming-deserialisation-tools | 9410445ca0f517c904745bd211af05cff40dec5d | [
"BSD-2-Clause"
] | null | null | null | # automatically generated, do not modify
# namespace:
import flatbuffers
class ArrayByte(object):
__slots__ = ['_tab']
# ArrayByte
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ArrayByte
def Value(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# ArrayByte
def ValueLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
def ArrayByteStart(builder): builder.StartObject(1)
def ArrayByteAddValue(builder, value): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def ArrayByteStartValueVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def ArrayByteEnd(builder): return builder.EndObject()
| 32.939394 | 135 | 0.696412 |
bdd60322cd578ec69ca349f5076338adfba9ad1a | 11,624 | py | Python | tests/inference/qmhl_loss_test.py | zaqqwerty/qhbm-library | a4886adf91a3e2e2bc6f6b07a6e13ffe8cf0aa2e | [
"Apache-2.0"
] | 19 | 2022-02-17T19:32:36.000Z | 2022-03-31T01:07:19.000Z | tests/inference/qmhl_loss_test.py | zaqqwerty/qhbm-library | a4886adf91a3e2e2bc6f6b07a6e13ffe8cf0aa2e | [
"Apache-2.0"
] | 37 | 2022-02-17T17:26:01.000Z | 2022-03-31T21:32:16.000Z | tests/inference/qmhl_loss_test.py | zaqqwerty/qhbm-library | a4886adf91a3e2e2bc6f6b07a6e13ffe8cf0aa2e | [
"Apache-2.0"
] | 8 | 2022-02-17T17:26:32.000Z | 2022-03-18T12:11:33.000Z | # Copyright 2021 The QHBM Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for qhbmlib.inference.qmhl_loss"""
import functools
import math
import cirq
import sympy
import tensorflow as tf
import tensorflow_probability as tfp
from qhbmlib import data
from qhbmlib import inference
from qhbmlib import models
from tests import test_util
class QMHLTest(tf.test.TestCase):
"""Tests for the QMHL loss and gradients."""
def setUp(self):
"""Initializes test objects."""
super().setUp()
self.num_qubits_list = [1, 2]
self.tf_random_seed = 4
self.tf_random_seed_alt = 7
self.tfp_seed = tf.constant([3, 6], tf.int32)
# TODO(#190)
self.num_samples = int(1e6)
self.close_rtol = 2e-2
self.zero_atol = 2e-3
self.not_zero_atol = 2e-3
@test_util.eager_mode_toggle
def test_self_qmhl(self):
"""Confirms known value of the QMHL loss of a model against itself."""
num_layers = 1
qmhl_wrapper = tf.function(inference.qmhl)
for num_qubits in self.num_qubits_list:
qubits = cirq.GridQubit.rect(1, num_qubits)
data_h, data_infer = test_util.get_random_hamiltonian_and_inference(
qubits, num_layers, f"data_objects_{num_qubits}", self.num_samples)
model_h, model_infer = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"hamiltonian_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed)
# Set data equal to the model
data_h.set_weights(model_h.get_weights())
actual_data = data.QHBMData(data_infer)
# Trained loss is the entropy.
expected_loss = model_infer.e_inference.entropy()
# Since this is the optimum, derivatives should all be zero.
expected_loss_derivative = [
tf.zeros_like(v) for v in model_h.trainable_variables
]
with tf.GradientTape() as tape:
actual_loss = qmhl_wrapper(actual_data, model_infer)
actual_loss_derivative = tape.gradient(actual_loss,
model_h.trainable_variables)
self.assertAllClose(actual_loss, expected_loss, rtol=self.close_rtol)
self.assertAllClose(
actual_loss_derivative, expected_loss_derivative, atol=self.zero_atol)
@test_util.eager_mode_toggle
def test_hamiltonian_qmhl(self):
"""Tests derivatives of QMHL with respect to the model."""
# TODO(#171): Delta function seems generalizable.
def delta_qmhl(k, var, actual_data, model_qhbm, delta):
"""Calculates the qmhl loss with the kth entry of `var` perturbed."""
num_elts = tf.size(var)
old_value = var.read_value()
var.assign(old_value + delta * tf.one_hot(k, num_elts, 1.0, 0.0))
delta_loss = inference.qmhl(actual_data, model_qhbm)
var.assign(old_value)
return delta_loss
qmhl_wrapper = tf.function(inference.qmhl)
for num_qubits in self.num_qubits_list:
qubits = cirq.GridQubit.rect(1, num_qubits)
num_layers = 2
_, data_qhbm = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"data_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed,
ebm_seed=self.tfp_seed)
actual_data = data.QHBMData(data_qhbm)
model_h, model_qhbm = test_util.get_random_hamiltonian_and_inference(
qubits,
num_layers,
f"model_objects_{num_qubits}",
self.num_samples,
initializer_seed=self.tf_random_seed_alt,
ebm_seed=self.tfp_seed)
# Make sure variables are trainable
self.assertGreater(len(model_h.trainable_variables), 1)
with tf.GradientTape() as tape:
actual_loss = qmhl_wrapper(actual_data, model_qhbm)
actual_derivative = tape.gradient(actual_loss,
model_h.trainable_variables)
expected_derivative = test_util.approximate_gradient(
functools.partial(qmhl_wrapper, actual_data, model_qhbm),
model_h.trainable_variables)
# Changing model parameters is working if finite difference derivatives
# are non-zero. Also confirms that model_h and data_h are different.
tf.nest.map_structure(
lambda x: self.assertAllGreater(tf.abs(x), self.not_zero_atol),
expected_derivative)
self.assertAllClose(
actual_derivative, expected_derivative, rtol=self.close_rtol)
def test_loss_value_x_rot(self):
"""Confirms correct values for a single qubit X rotation QHBM.
We use a data state which is a Y rotation of an initially diagonal density
operator. The QHBM is a Bernoulli latent state with X rotation QNN.
See the colab notebook at the following link for derivations:
https://colab.research.google.com/drive/14987JCMju_8AVvvVoojwe6hA7Nlw-Dhe?usp=sharing
Since each qubit is independent, the loss is the sum over the individual
qubit losses, and the gradients are the the per-qubit gradients.
"""
ebm_const = 1.0
q_const = math.pi
for num_qubits in self.num_qubits_list:
# EBM
ebm_init = tf.keras.initializers.RandomUniform(
minval=ebm_const / 4, maxval=ebm_const, seed=self.tf_random_seed)
actual_energy = models.BernoulliEnergy(list(range(num_qubits)), ebm_init)
e_infer = inference.BernoulliEnergyInference(
actual_energy, self.num_samples, initial_seed=self.tfp_seed)
# QNN
qubits = cirq.GridQubit.rect(1, num_qubits)
r_symbols = [sympy.Symbol(f"phi_{n}") for n in range(num_qubits)]
r_circuit = cirq.Circuit(
cirq.rx(r_s)(q) for r_s, q in zip(r_symbols, qubits))
qnn_init = tf.keras.initializers.RandomUniform(
minval=q_const / 4, maxval=q_const, seed=self.tf_random_seed)
actual_circuit = models.DirectQuantumCircuit(r_circuit, qnn_init)
q_infer = inference.AnalyticQuantumInference(actual_circuit)
qhbm_infer = inference.QHBM(e_infer, q_infer)
model = qhbm_infer.modular_hamiltonian
# Confirm qhbm_model QHBM
test_thetas = model.energy.trainable_variables[0]
test_phis = model.circuit.trainable_variables[0]
with tf.GradientTape() as log_partition_tape:
actual_log_partition = qhbm_infer.e_inference.log_partition()
expected_log_partition = tf.reduce_sum(
tf.math.log(2 * tf.math.cosh(test_thetas)))
self.assertAllClose(
actual_log_partition, expected_log_partition, rtol=self.close_rtol)
# Confirm qhbm_model modular Hamiltonian for 1 qubit case
if num_qubits == 1:
actual_dm = inference.density_matrix(model)
actual_log_dm = tf.linalg.logm(actual_dm)
actual_ktp = -actual_log_dm - tf.eye(
2, dtype=tf.complex64) * tf.cast(actual_log_partition, tf.complex64)
a = (test_thetas[0] * tf.math.cos(test_phis[0])).numpy() + 0j
b = 1j * (test_thetas[0] * tf.math.sin(test_phis[0])).numpy()
c = -1j * (test_thetas[0] * tf.math.sin(test_phis[0])).numpy()
d = -(test_thetas[0] * tf.math.cos(test_phis[0])).numpy() + 0j
expected_ktp = tf.constant([[a, b], [c, d]], dtype=tf.complex64)
self.assertAllClose(actual_ktp, expected_ktp, rtol=self.close_rtol)
# Build target data
alphas = tf.random.uniform([num_qubits], -q_const, q_const, tf.float32,
self.tf_random_seed)
y_rot = cirq.Circuit(
cirq.ry(r.numpy())(q) for r, q in zip(alphas, qubits))
data_circuit = models.DirectQuantumCircuit(y_rot)
data_q_infer = inference.AnalyticQuantumInference(data_circuit)
data_probs = tf.random.uniform([num_qubits],
dtype=tf.float32,
seed=self.tf_random_seed)
data_samples = tfp.distributions.Bernoulli(
probs=1 - data_probs, dtype=tf.int8).sample(
self.num_samples, seed=self.tfp_seed)
# Load target data into a QuantumData class
class FixedData(data.QuantumData):
"""Contains a fixed quantum data set."""
def __init__(self, samples, q_infer):
"""Initializes a FixedData."""
self.samples = samples
self.q_infer = q_infer
def expectation(self, observable):
"""Averages over the fixed quantum data set."""
raw_expectations = self.q_infer.expectation(self.samples, observable)
return tf.math.reduce_mean(raw_expectations)
actual_data = FixedData(data_samples, data_q_infer)
qmhl_wrapper = tf.function(inference.qmhl)
with tf.GradientTape() as loss_tape:
actual_loss = qmhl_wrapper(actual_data, qhbm_infer)
# TODO(zaqqwerty): add way to use a log QHBM as observable on states
expected_expectation = tf.reduce_sum(test_thetas * (2 * data_probs - 1) *
tf.math.cos(alphas) *
tf.math.cos(test_phis))
with tf.GradientTape() as expectation_tape:
actual_expectation = actual_data.expectation(
qhbm_infer.modular_hamiltonian)
self.assertAllClose(actual_expectation, expected_expectation,
self.close_rtol)
expected_loss = expected_expectation + expected_log_partition
self.assertAllClose(actual_loss, expected_loss, rtol=self.close_rtol)
expected_log_partition_grad = tf.math.tanh(test_thetas)
actual_log_partition_grad = log_partition_tape.gradient(
actual_log_partition, test_thetas)
self.assertAllClose(
actual_log_partition_grad,
expected_log_partition_grad,
rtol=self.close_rtol)
expected_expectation_thetas_grad = (
2 * data_probs - 1) * tf.math.cos(alphas) * tf.math.cos(test_phis)
expected_expectation_phis_grad = -test_thetas * (
2 * data_probs - 1) * tf.math.cos(alphas) * tf.math.sin(test_phis)
(actual_expectation_thetas_grad,
actual_expectation_phis_grad) = expectation_tape.gradient(
actual_expectation, (test_thetas, test_phis))
self.assertAllClose(
actual_expectation_thetas_grad,
expected_expectation_thetas_grad,
rtol=self.close_rtol)
self.assertAllClose(
actual_expectation_phis_grad,
expected_expectation_phis_grad,
rtol=self.close_rtol)
actual_thetas_grads, actual_phis_grads = loss_tape.gradient(
actual_loss, (test_thetas, test_phis))
expected_thetas_grads = (
expected_expectation_thetas_grad + expected_log_partition_grad)
expected_phis_grads = expected_expectation_phis_grad
self.assertAllClose(
actual_thetas_grads, expected_thetas_grads, rtol=self.close_rtol)
self.assertAllClose(
actual_phis_grads, expected_phis_grads, rtol=self.close_rtol)
if __name__ == "__main__":
print("Running qmhl_loss_test.py ...")
tf.test.main()
| 41.81295 | 89 | 0.678768 |
859b921d912edbaee5dddf15826f6e4ffc5e7355 | 6,668 | py | Python | tests/flytekit/unit/core/test_workflows.py | frsann/flytekit | 6c032035563ae645b0b93558b3fe3362080057ea | [
"Apache-2.0"
] | null | null | null | tests/flytekit/unit/core/test_workflows.py | frsann/flytekit | 6c032035563ae645b0b93558b3fe3362080057ea | [
"Apache-2.0"
] | null | null | null | tests/flytekit/unit/core/test_workflows.py | frsann/flytekit | 6c032035563ae645b0b93558b3fe3362080057ea | [
"Apache-2.0"
] | null | null | null | import typing
from collections import OrderedDict
import pytest
from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException
from flytekit.common.translator import get_serializable
from flytekit.core import context_manager
from flytekit.core.condition import conditional
from flytekit.core.context_manager import Image, ImageConfig
from flytekit.core.task import task
from flytekit.core.workflow import WorkflowFailurePolicy, WorkflowMetadata, WorkflowMetadataDefaults, workflow
default_img = Image(name="default", fqn="test", tag="tag")
serialization_settings = context_manager.SerializationSettings(
project="project",
domain="domain",
version="version",
env=None,
image_config=ImageConfig(default_image=default_img, images=[default_img]),
)
def test_metadata_values():
with pytest.raises(FlyteValidationException):
WorkflowMetadata(on_failure=0)
wm = WorkflowMetadata(on_failure=WorkflowFailurePolicy.FAIL_IMMEDIATELY)
assert wm.on_failure == WorkflowFailurePolicy.FAIL_IMMEDIATELY
def test_default_metadata_values():
with pytest.raises(FlyteValidationException):
WorkflowMetadataDefaults(3)
wm = WorkflowMetadataDefaults(interruptible=False)
assert wm.interruptible is False
def test_workflow_values():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@workflow(interruptible=True, failure_policy=WorkflowFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE)
def wf(a: int) -> (str, str):
x, y = t1(a=a)
u, v = t1(a=x)
return y, v
wf_spec = get_serializable(OrderedDict(), serialization_settings, wf)
assert wf_spec.template.metadata_defaults.interruptible
assert wf_spec.template.metadata.on_failure == 1
def test_default_values():
@task
def t() -> bool:
return True
@task
def f() -> bool:
return False
@workflow
def wf(a: bool = True) -> bool:
return conditional("bool").if_(a.is_true()).then(t()).else_().then(f()) # type: ignore
assert wf() is True
assert wf(a=False) is False
def test_list_output_wf():
@task
def t1(a: int) -> int:
a = a + 5
return a
@workflow
def list_output_wf() -> typing.List[int]:
v = []
for i in range(2):
v.append(t1(a=i))
return v
x = list_output_wf()
assert x == [5, 6]
def test_sub_wf_single_named_tuple():
nt = typing.NamedTuple("SingleNamedOutput", named1=int)
@task
def t1(a: int) -> nt:
a = a + 2
return nt(a)
@workflow
def subwf(a: int) -> nt:
return t1(a=a)
@workflow
def wf(b: int) -> nt:
out = subwf(a=b)
return t1(a=out.named1)
x = wf(b=3)
assert x == (7,)
def test_sub_wf_multi_named_tuple():
nt = typing.NamedTuple("Multi", named1=int, named2=int)
@task
def t1(a: int) -> nt:
a = a + 2
return nt(a, a)
@workflow
def subwf(a: int) -> nt:
return t1(a=a)
@workflow
def wf(b: int) -> nt:
out = subwf(a=b)
return t1(a=out.named1)
x = wf(b=3)
assert x == (7, 7)
def test_unexpected_outputs():
@task
def t1(a: int) -> int:
a = a + 5
return a
@workflow
def no_outputs_wf():
return t1(a=3)
# Should raise an exception because the workflow returns something when it shouldn't
with pytest.raises(FlyteValueException):
no_outputs_wf()
# Should raise an exception because it doesn't return something when it should
with pytest.raises(AssertionError):
@workflow
def one_output_wf() -> int: # noqa
t1(a=3)
def test_wf_no_output():
@task
def t1(a: int) -> int:
a = a + 5
return a
@workflow
def no_outputs_wf():
t1(a=3)
assert no_outputs_wf() is None
def test_wf_nested_comp():
@task
def t1(a: int) -> int:
a = a + 5
return a
@workflow
def outer() -> typing.Tuple[int, int]:
# You should not do this. This is just here for testing.
@workflow
def wf2() -> int:
return t1(a=5)
return t1(a=3), wf2()
assert (8, 10) == outer()
entity_mapping = OrderedDict()
model_wf = get_serializable(entity_mapping, serialization_settings, outer)
assert len(model_wf.template.interface.outputs) == 2
assert len(model_wf.template.nodes) == 2
assert model_wf.template.nodes[1].workflow_node is not None
sub_wf = model_wf.sub_workflows[0]
assert len(sub_wf.nodes) == 1
assert sub_wf.nodes[0].id == "wf2-n0"
assert sub_wf.nodes[0].task_node.reference_id.name == "test_workflows.t1"
@task
def add_5(a: int) -> int:
a = a + 5
return a
@workflow
def simple_wf() -> int:
return add_5(a=1)
@workflow
def my_wf_example(a: int) -> typing.Tuple[int, int]:
"""example
Workflows can have inputs and return outputs of simple or complex types.
:param a: input a
:return: outputs
"""
x = add_5(a=a)
# You can use outputs of a previous task as inputs to other nodes.
z = add_5(a=x)
# You can call other workflows from within this workflow
d = simple_wf()
# You can add conditions that can run on primitive types and execute different branches
e = conditional("bool").if_(a == 5).then(add_5(a=d)).else_().then(add_5(a=z))
# Outputs of the workflow have to be outputs returned by prior nodes.
# No outputs and single or multiple outputs are supported
return x, e
def test_all_node_types():
assert my_wf_example(a=1) == (6, 16)
entity_mapping = OrderedDict()
model_wf = get_serializable(entity_mapping, serialization_settings, my_wf_example)
assert len(model_wf.template.interface.outputs) == 2
assert len(model_wf.template.nodes) == 4
assert model_wf.template.nodes[2].workflow_node is not None
sub_wf = model_wf.sub_workflows[0]
assert len(sub_wf.nodes) == 1
assert sub_wf.nodes[0].id == "n0"
assert sub_wf.nodes[0].task_node.reference_id.name == "test_workflows.add_5"
def test_wf_docstring():
model_wf = get_serializable(OrderedDict(), serialization_settings, my_wf_example)
assert len(model_wf.template.interface.outputs) == 2
assert model_wf.template.interface.outputs["o0"].description == "outputs"
assert model_wf.template.interface.outputs["o1"].description == "outputs"
assert len(model_wf.template.interface.inputs) == 1
assert model_wf.template.interface.inputs["a"].description == "input a"
| 25.745174 | 110 | 0.657618 |
a75c94205ca5f6a9ad5dbf0bd477f45d6852ca87 | 1,119 | py | Python | shoobsblog/users/tests/test_forms.py | mattshoobs/AgileStoryboard | 80f14192702330c51b2ff130b25b4bc6d0c326a0 | [
"Apache-1.1"
] | null | null | null | shoobsblog/users/tests/test_forms.py | mattshoobs/AgileStoryboard | 80f14192702330c51b2ff130b25b4bc6d0c326a0 | [
"Apache-1.1"
] | null | null | null | shoobsblog/users/tests/test_forms.py | mattshoobs/AgileStoryboard | 80f14192702330c51b2ff130b25b4bc6d0c326a0 | [
"Apache-1.1"
] | null | null | null | import pytest
from shoobsblog.users.forms import UserCreationForm
from shoobsblog.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 27.292683 | 59 | 0.595174 |
68cf4b5a4056e02ad1b141e38907bfb306a13b0d | 328 | py | Python | auto_erpnext/config/docs.py | faztp12/auto_erpnext | a38f28cb3c4bae541426bd7375a68d01bb360d66 | [
"MIT"
] | 2 | 2020-12-28T16:34:34.000Z | 2021-01-15T13:35:44.000Z | auto_erpnext/config/docs.py | faztp12/auto_erpnext | a38f28cb3c4bae541426bd7375a68d01bb360d66 | [
"MIT"
] | null | null | null | auto_erpnext/config/docs.py | faztp12/auto_erpnext | a38f28cb3c4bae541426bd7375a68d01bb360d66 | [
"MIT"
] | 4 | 2018-12-20T10:50:41.000Z | 2020-09-13T16:46:58.000Z | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/auto_erpnext"
# docs_base_url = "https://[org_name].github.io/auto_erpnext"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Auto Erpnext"
| 27.333333 | 68 | 0.731707 |
1c3c16f2f9c12a308f80775d111741f371af1209 | 1,044 | py | Python | python/bitcoin_101/bitcoin_client.py | fcracker79/python_bitcoin_101 | 3af1b1e1b817d9676981f3c6c87d93064ac8febe | [
"MIT"
] | 1 | 2018-09-26T18:10:36.000Z | 2018-09-26T18:10:36.000Z | python/bitcoin_101/bitcoin_client.py | fcracker79/python_bitcoin_101 | 3af1b1e1b817d9676981f3c6c87d93064ac8febe | [
"MIT"
] | null | null | null | python/bitcoin_101/bitcoin_client.py | fcracker79/python_bitcoin_101 | 3af1b1e1b817d9676981f3c6c87d93064ac8febe | [
"MIT"
] | null | null | null | import base64
from bitcoin_101 import bitjson_dumps, bitjson_loads
import requests
class BitcoinClient:
def __init__(self):
btcd_auth_header = b'Basic ' + base64.b64encode(b'bitcoin:qwertyuiop')
self.btcd_headers = {'content-type': 'application/json', 'Authorization': btcd_auth_header}
def __getattr__(self, item):
if item == '_call':
return self._call
return lambda *a, **kw: self._call(item, *a, **kw)
def _call(self, method: str, *params, expect_json: bool=True):
payload = bitjson_dumps({
'method': method,
'params': params,
'jsonrpc': '2.0',
'id': 0,
})
resp = requests.post('http://localhost:18332', data=payload, headers=self.btcd_headers)
content = resp.text
if expect_json:
content = bitjson_loads(content)
if content['error']:
raise ValueError('Error: {}'.format(content['error']))
return content['result']
return content
| 30.705882 | 99 | 0.59387 |
5282bca7fce8729741798cb0dce38a5acd071213 | 1,220 | py | Python | data/youtube/download.py | tagny/iLID | 38f5dcae0dc84fd9b78e170748aa38cd8f524c70 | [
"MIT"
] | 90 | 2016-02-19T12:37:20.000Z | 2022-02-25T19:52:46.000Z | data/youtube/download.py | vyas97/iLID | 4d124b76fdbc37fbafd12e860281a4bc3ddf87d9 | [
"MIT"
] | 7 | 2017-03-24T04:12:09.000Z | 2020-06-16T11:27:54.000Z | data/youtube/download.py | vyas97/iLID | 4d124b76fdbc37fbafd12e860281a4bc3ddf87d9 | [
"MIT"
] | 31 | 2016-02-01T12:52:51.000Z | 2021-08-16T04:27:59.000Z | import yaml
import subprocess
import os
def read_yaml(file_name):
with open(file_name, "r") as f:
return yaml.load(f)
def download(language, source, source_name, source_type):
output_path = "{0}/{1}".format(language, source_name)
if os.path.exists(output_path):
print "skipping {0} {1} because the target folder already exists".format(source_type, source_name)
else:
print "downloading {0} {1}".format(source_type, source_name)
command = """youtube-dl -i --max-downloads 500 --extract-audio --audio-format mp3 {0} -o "{1}/{2}/%(title)s.%(ext)s" """.format(source,language,source_name)
subprocess.call(command, shell=True)
def download_user(language, user):
user_selector = "ytuser:%s" % user
download(language, user_selector, user, "user")
def download_playlist(language, playlist_name, playlist_id):
download(language, playlist_id, playlist_name, "playlist")
if __name__ == '__main__':
sources = read_yaml("sources.yml")
for language, categories in sources.items():
for user in categories["users"]:
download_user(language, user)
#for playlist_name, playlist_id in categories["playlists"].items():
# download_playlist(language, playlist_name, playlist_id)
| 35.882353 | 160 | 0.722131 |
5631814b4c9c0c56f06fb651305b7cf15ec952b4 | 2,906 | py | Python | tests/test_source_file/test_source_file.py | ethanio12345/OpenMC | 3b0c044974c59773dac4e3ce261a87e18fc53de5 | [
"MIT"
] | null | null | null | tests/test_source_file/test_source_file.py | ethanio12345/OpenMC | 3b0c044974c59773dac4e3ce261a87e18fc53de5 | [
"MIT"
] | null | null | null | tests/test_source_file/test_source_file.py | ethanio12345/OpenMC | 3b0c044974c59773dac4e3ce261a87e18fc53de5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import sys
sys.path.insert(0, os.pardir)
from testing_harness import *
settings1="""<?xml version="1.0"?>
<settings>
<state_point batches="10" />
<source_point separate="true" />
<eigenvalue>
<batches>10</batches>
<inactive>5</inactive>
<particles>1000</particles>
</eigenvalue>
<source>
<space type="box">
<parameters>-4 -4 -4 4 4 4</parameters>
</space>
</source>
</settings>
"""
settings2 = """<?xml version="1.0"?>
<settings>
<eigenvalue>
<batches>10</batches>
<inactive>5</inactive>
<particles>1000</particles>
</eigenvalue>
<source>
<file> source.10.{0} </file>
</source>
</settings>
"""
class SourceFileTestHarness(TestHarness):
def execute_test(self):
"""Run OpenMC with the appropriate arguments and check the outputs."""
try:
self._run_openmc()
self._test_output_created()
self._run_openmc_restart()
results = self._get_results()
self._write_results(results)
self._compare_results()
finally:
self._cleanup()
def update_results(self):
"""Update the results_true using the current version of OpenMC."""
try:
self._run_openmc()
self._test_output_created()
self._run_openmc_restart()
results = self._get_results()
self._write_results(results)
self._overwrite_results()
finally:
self._cleanup()
def _test_output_created(self):
"""Make sure statepoint and source files have been created."""
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))
assert len(statepoint) == 1, 'Either multiple or no statepoint files ' \
'exist.'
assert statepoint[0].endswith('h5'), \
'Statepoint file is not a HDF5 file.'
source = glob.glob(os.path.join(os.getcwd(), 'source.10.*'))
assert len(source) == 1, 'Either multiple or no source files exist.'
assert source[0].endswith('h5'), \
'Source file is not a HDF5 file.'
def _run_openmc_restart(self):
# Get the name of the source file.
source = glob.glob(os.path.join(os.getcwd(), 'source.10.*'))
# Write the new settings.xml file.
with open('settings.xml','w') as fh:
fh.write(settings2.format(source[0].split('.')[-1]))
# Run OpenMC.
self._run_openmc()
def _cleanup(self):
TestHarness._cleanup(self)
output = glob.glob(os.path.join(os.getcwd(), 'source.*'))
for f in output:
if os.path.exists(f):
os.remove(f)
with open('settings.xml','w') as fh:
fh.write(settings1)
if __name__ == '__main__':
harness = SourceFileTestHarness('statepoint.10.h5')
harness.main()
| 28.213592 | 80 | 0.593255 |
89749c3e40460646ee17cfadfc9ba17fc593a687 | 1,977 | py | Python | src/Graphing/image_to_plot.py | njanirudh/R-D-HBRS | 220777628406366179106069eb9ce211e50e7543 | [
"MIT"
] | 3 | 2019-08-27T22:00:56.000Z | 2020-04-15T06:31:39.000Z | src/Graphing/image_to_plot.py | njanirudh/R-D-HBRS | 220777628406366179106069eb9ce211e50e7543 | [
"MIT"
] | 1 | 2019-10-29T13:21:21.000Z | 2019-10-29T14:01:28.000Z | src/Graphing/image_to_plot.py | njanirudh/Research-Development-HBRS | 220777628406366179106069eb9ce211e50e7543 | [
"MIT"
] | null | null | null | from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.cbook import get_sample_data
from matplotlib._png import read_png
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pandas as pd
import sklearn.preprocessing as preproc
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X_val = []
Y_val = []
Z_val = []
with open('/home/nj/HBRS/RnD/Research-Development-HBRS/reports/drawer_handle_grasp - drawer.csv') as csv_file:
# with open('/home/nj/HBRS/RnD/Research-Development-HBRS/reports/drawer_handle_grasp - fridge.csv') as csv_file:
csv_reader = pd.read_csv(csv_file, delimiter=',')
# print(csv_reader.head())
# print(csv_reader['handle_x'].tolist())
X_val = csv_reader['handle_x'].tolist()
Y_val = csv_reader['handle_y'].tolist()
Z_val = csv_reader['handle_z'].tolist()
marker = np.array(csv_reader['Success'].tolist())
# marker[np.where(marker == 0)] = 'red'
# marker[np.where(marker == 1)] = 'green'
color_list = ['r' if i == 0 else 'g' for i in marker]
# print(color_list)
#------------------------------------------------------
fn = get_sample_data("/home/nj/HBRS/RnD/Research-Development-HBRS/images/frcnn.png", asfileobj=False)
img = read_png(fn)
x_lim, y_lim = ogrid[0:img.shape[0], 0:img.shape[1]]
# lower_x, upper_x = min(X_val),max(X_val)
# x_norm = np.array([lower_x + (upper_x - lower_x) * x for x in x_lim])
y_norm = preproc.minmax_scale(x_lim,(min(Y_val),max(Y_val)))
z_norm = preproc.minmax_scale(np.array(y_lim).T,(min(Z_val),max(Z_val)))
# print(min(y_norm),max(y_norm))
# print(min(Z_val),max(Z_val))
# print(z_norm)
ax = gca(projection='3d')
ax.plot_surface(np.atleast_2d(0.80), y_norm, z_norm.T, rstride=10, cstride=10, facecolors=img)
#------------------------------------------------------
ax.scatter(X_val, Y_val, Z_val,color=color_list)
ax.set_xlabel('X Distance - meters')
ax.set_ylabel('Y Distance - meters')
ax.set_zlabel('Z Distance - meters')
plt.show()
| 33.508475 | 112 | 0.679312 |
3b534eaeb07df4e83f6454fec30f2f280d0634c5 | 1,779 | py | Python | modules/m_wolframalpha.py | LunaNyan/Luna_Libertin_Discord_Bot | db7def99602b65b682a128430118e3941aabcd6e | [
"MIT"
] | 8 | 2019-08-17T17:30:10.000Z | 2021-07-21T01:59:32.000Z | modules/m_wolframalpha.py | LunaNyan/Luna_Libertin_Discord_Bot | db7def99602b65b682a128430118e3941aabcd6e | [
"MIT"
] | 2 | 2019-07-16T10:01:39.000Z | 2019-12-30T11:30:29.000Z | modules/m_wolframalpha.py | LunaNyan/Luna_Libertin_Discord_Bot | db7def99602b65b682a128430118e3941aabcd6e | [
"MIT"
] | 3 | 2019-08-20T22:43:16.000Z | 2020-07-13T23:24:08.000Z | import sys, requests, wolframalpha, xmltodict
if __name__=="__main__":
print("FATAL : Run this bot from right way.")
sys.exit(1)
client = ""
def load(conf):
global client
client = wolframalpha.Client(conf.get("wolframalpha", "appid"))
def wa_calc(query):
res = client.query(query)
answer = next(res.results).text
return answer
def wa_img(conf, query):
url = 'http://api.wolframalpha.com/v1/query?input=%s&appid=%s' %(query, conf.get("wolframalpha", "appid"))
res = requests.get(url)
xml_content = res.content
dict_content = xmltodict.parse(xml_content)
results = []
pods = dict_content["queryresult"]["pod"]
try:
plot_url = pods["subpod"]["img"]["@src"]
except:
try:
plot_url = pods[3]["subpod"][0]["img"]["@src"]
except:
plot_url = pods[2]["subpod"]["img"]["@src"]
img_data = requests.get(plot_url).content
with open('wa_temp_img.gif', 'wb') as handler:
handler.write(img_data)
return 'wa_temp_img.gif'
def captcha(conf, query):
url = 'http://api.wolframalpha.com/v1/query?input=%s&appid=%s' %(query, conf.get("wolframalpha", "appid"))
res = requests.get(url)
xml_content = res.content
dict_content = xmltodict.parse(xml_content)
results = []
for pod in dict_content['queryresult']['pod']:
result = {}
title = pod['@title']
link = pod['subpod']['img']['@src']
data = pod['subpod']['img']['@alt']
result['caption'] = title
result['image'] = link
result['data'] = data
results.append(result)
img_data = requests.get(result['image']).content
with open('wa_temp_img.gif', 'wb') as handler:
handler.write(img_data)
return 'wa_temp_img.gif' | 32.345455 | 110 | 0.611017 |
cb7d34d1a1eb767227b03d4c2d5f535f41f883fe | 4,143 | py | Python | google/bigtable/admin/v2/bigtable-admin-v2-py/google/cloud/bigtable_admin_v2/types/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/bigtable/admin/v2/bigtable-admin-v2-py/google/cloud/bigtable_admin_v2/types/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/bigtable/admin/v2/bigtable-admin-v2-py/google/cloud/bigtable_admin_v2/types/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .bigtable_instance_admin import (
CreateAppProfileRequest,
CreateClusterMetadata,
CreateClusterRequest,
CreateInstanceMetadata,
CreateInstanceRequest,
DeleteAppProfileRequest,
DeleteClusterRequest,
DeleteInstanceRequest,
GetAppProfileRequest,
GetClusterRequest,
GetInstanceRequest,
ListAppProfilesRequest,
ListAppProfilesResponse,
ListClustersRequest,
ListClustersResponse,
ListInstancesRequest,
ListInstancesResponse,
PartialUpdateInstanceRequest,
UpdateAppProfileMetadata,
UpdateAppProfileRequest,
UpdateClusterMetadata,
UpdateInstanceMetadata,
)
from .bigtable_table_admin import (
CheckConsistencyRequest,
CheckConsistencyResponse,
CreateBackupMetadata,
CreateBackupRequest,
CreateTableFromSnapshotMetadata,
CreateTableFromSnapshotRequest,
CreateTableRequest,
DeleteBackupRequest,
DeleteSnapshotRequest,
DeleteTableRequest,
DropRowRangeRequest,
GenerateConsistencyTokenRequest,
GenerateConsistencyTokenResponse,
GetBackupRequest,
GetSnapshotRequest,
GetTableRequest,
ListBackupsRequest,
ListBackupsResponse,
ListSnapshotsRequest,
ListSnapshotsResponse,
ListTablesRequest,
ListTablesResponse,
ModifyColumnFamiliesRequest,
OptimizeRestoredTableMetadata,
RestoreTableMetadata,
RestoreTableRequest,
SnapshotTableMetadata,
SnapshotTableRequest,
UpdateBackupRequest,
)
from .common import (
OperationProgress,
StorageType,
)
from .instance import (
AppProfile,
Cluster,
Instance,
)
from .table import (
Backup,
BackupInfo,
ColumnFamily,
EncryptionInfo,
GcRule,
RestoreInfo,
Snapshot,
Table,
RestoreSourceType,
)
__all__ = (
'CreateAppProfileRequest',
'CreateClusterMetadata',
'CreateClusterRequest',
'CreateInstanceMetadata',
'CreateInstanceRequest',
'DeleteAppProfileRequest',
'DeleteClusterRequest',
'DeleteInstanceRequest',
'GetAppProfileRequest',
'GetClusterRequest',
'GetInstanceRequest',
'ListAppProfilesRequest',
'ListAppProfilesResponse',
'ListClustersRequest',
'ListClustersResponse',
'ListInstancesRequest',
'ListInstancesResponse',
'PartialUpdateInstanceRequest',
'UpdateAppProfileMetadata',
'UpdateAppProfileRequest',
'UpdateClusterMetadata',
'UpdateInstanceMetadata',
'CheckConsistencyRequest',
'CheckConsistencyResponse',
'CreateBackupMetadata',
'CreateBackupRequest',
'CreateTableFromSnapshotMetadata',
'CreateTableFromSnapshotRequest',
'CreateTableRequest',
'DeleteBackupRequest',
'DeleteSnapshotRequest',
'DeleteTableRequest',
'DropRowRangeRequest',
'GenerateConsistencyTokenRequest',
'GenerateConsistencyTokenResponse',
'GetBackupRequest',
'GetSnapshotRequest',
'GetTableRequest',
'ListBackupsRequest',
'ListBackupsResponse',
'ListSnapshotsRequest',
'ListSnapshotsResponse',
'ListTablesRequest',
'ListTablesResponse',
'ModifyColumnFamiliesRequest',
'OptimizeRestoredTableMetadata',
'RestoreTableMetadata',
'RestoreTableRequest',
'SnapshotTableMetadata',
'SnapshotTableRequest',
'UpdateBackupRequest',
'OperationProgress',
'StorageType',
'AppProfile',
'Cluster',
'Instance',
'Backup',
'BackupInfo',
'ColumnFamily',
'EncryptionInfo',
'GcRule',
'RestoreInfo',
'Snapshot',
'Table',
'RestoreSourceType',
)
| 26.056604 | 74 | 0.732078 |
2b35e5686a7f62feaae4462a96f11f2cebcb570f | 323 | py | Python | tests/test_lyrics_prediction_file.py | castillogo/lyrics_project | 59855bfe91b5a41a68d52540a7ca6a8494a94ded | [
"MIT"
] | null | null | null | tests/test_lyrics_prediction_file.py | castillogo/lyrics_project | 59855bfe91b5a41a68d52540a7ca6a8494a94ded | [
"MIT"
] | 1 | 2021-11-15T17:50:49.000Z | 2021-11-15T17:50:49.000Z | tests/test_lyrics_prediction_file.py | castillogo/lyrics_project | 59855bfe91b5a41a68d52540a7ca6a8494a94ded | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 15:59:40 2020
@author: casti
"""
import pytest
from lyrics_prediction_file import print_evaluations
Y_TEST = [1, 2, 3, 4, 5, 6, 7, 8, 9]
Y_PRED = [1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_print_evaluations():
assert print_evaluations(Y_TEST, Y_PRED, 'Randomforest') > 0
| 20.1875 | 64 | 0.656347 |
3227f5ea7b7d29ea7e4a9ac76db7d798389a8570 | 4,052 | py | Python | render/render_depth.py | StefanieStoppel/pcn | 71b17791380798d349c18f96514099183d89836e | [
"MIT"
] | 9 | 2021-04-10T07:42:36.000Z | 2022-03-27T15:21:38.000Z | render/render_depth.py | StefanieStoppel/pcn | 71b17791380798d349c18f96514099183d89836e | [
"MIT"
] | null | null | null | render/render_depth.py | StefanieStoppel/pcn | 71b17791380798d349c18f96514099183d89836e | [
"MIT"
] | 2 | 2021-04-21T01:26:31.000Z | 2021-05-23T13:31:10.000Z | # Author: Wentao Yuan (wyuan1@cs.cmu.edu) 05/31/2018
import bpy
import mathutils
import numpy as np
import os
import sys
import time
# Usage: blender -b -P render_depth.py [ShapeNet directory] [model list] [output directory] [num scans per model]
def random_pose():
angle_x = np.random.uniform() * 2 * np.pi
angle_y = np.random.uniform() * 2 * np.pi
angle_z = np.random.uniform() * 2 * np.pi
Rx = np.array([[1, 0, 0],
[0, np.cos(angle_x), -np.sin(angle_x)],
[0, np.sin(angle_x), np.cos(angle_x)]])
Ry = np.array([[np.cos(angle_y), 0, np.sin(angle_y)],
[0, 1, 0],
[-np.sin(angle_y), 0, np.cos(angle_y)]])
Rz = np.array([[np.cos(angle_z), -np.sin(angle_z), 0],
[np.sin(angle_z), np.cos(angle_z), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
# Set camera pointing to the origin and 1 unit away from the origin
t = np.expand_dims(R[:, 2], 1)
pose = np.concatenate([np.concatenate([R, t], 1), [[0, 0, 0, 1]]], 0)
return pose
def setup_blender(width, height, focal_length):
# camera
camera = bpy.data.objects['Camera']
camera.data.angle = np.arctan(width / 2 / focal_length) * 2
# render layer
scene = bpy.context.scene
scene.render.filepath = 'buffer'
scene.render.image_settings.color_depth = '16'
scene.render.resolution_percentage = 100
scene.render.resolution_x = width
scene.render.resolution_y = height
# compositor nodes
scene.use_nodes = True
tree = scene.node_tree
rl = tree.nodes.new('CompositorNodeRLayers')
output = tree.nodes.new('CompositorNodeOutputFile')
output.base_path = ''
output.format.file_format = 'OPEN_EXR'
tree.links.new(rl.outputs['Depth'], output.inputs[0])
# remove default cube
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete()
return scene, camera, output
if __name__ == '__main__':
model_dir = sys.argv[-4]
list_path = sys.argv[-3]
output_dir = sys.argv[-2]
num_scans = int(sys.argv[-1])
width = 160
height = 120
focal = 100
scene, camera, output = setup_blender(width, height, focal)
intrinsics = np.array([[focal, 0, width / 2], [0, focal, height / 2], [0, 0, 1]])
with open(os.path.join(list_path)) as file:
model_list = [line.strip() for line in file]
open('blender.log', 'w+').close()
os.system('rm -rf %s' % output_dir)
os.makedirs(output_dir)
np.savetxt(os.path.join(output_dir, 'intrinsics.txt'), intrinsics, '%f')
for model_id in model_list:
start = time.time()
exr_dir = os.path.join(output_dir, 'exr', model_id)
pose_dir = os.path.join(output_dir, 'pose', model_id)
os.makedirs(exr_dir)
os.makedirs(pose_dir)
# Redirect output to log file
old_os_out = os.dup(1)
os.close(1)
os.open('blender.log', os.O_WRONLY)
# Import mesh model
model_path = os.path.join(model_dir, model_id, 'model.obj')
bpy.ops.import_scene.obj(filepath=model_path)
# Rotate model by 90 degrees around x-axis (z-up => y-up) to match ShapeNet's coordinates
bpy.ops.transform.rotate(value=-np.pi / 2, axis=(1, 0, 0))
# Render
for i in range(num_scans):
scene.frame_set(i)
pose = random_pose()
camera.matrix_world = mathutils.Matrix(pose)
output.file_slots[0].path = os.path.join(exr_dir, '#.exr')
bpy.ops.render.render(write_still=True)
np.savetxt(os.path.join(pose_dir, '%d.txt' % i), pose, '%f')
# Clean up
bpy.ops.object.delete()
for m in bpy.data.meshes:
bpy.data.meshes.remove(m)
for m in bpy.data.materials:
m.user_clear()
bpy.data.materials.remove(m)
# Show time
os.close(1)
os.dup(old_os_out)
os.close(old_os_out)
print('%s done, time=%.4f sec' % (model_id, time.time() - start))
| 33.213115 | 113 | 0.598963 |
2f2ae01c5dc0c8dc36854cb4c64f4b985bf765eb | 783 | pyde | Python | mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde | katnino/processing.py | ff6450a6606908a014ce795340c06c8f6d90023d | [
"Apache-2.0"
] | 4 | 2016-08-09T14:14:36.000Z | 2021-12-10T07:51:35.000Z | mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde | katnino/processing.py | ff6450a6606908a014ce795340c06c8f6d90023d | [
"Apache-2.0"
] | null | null | null | mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde | katnino/processing.py | ff6450a6606908a014ce795340c06c8f6d90023d | [
"Apache-2.0"
] | null | null | null | """
Multiple Particle Systems
by Daniel Shiffman.
Click the mouse to generate a burst of particles
at mouse location.
Each burst is one instance of a particle system
with Particles and CrazyParticles (a subclass of Particle).
Note use of Inheritance and Polymorphism here.
"""
from crazy_particle import CrazyParticle
from particle import Particle
from particle_system import ParticleSystem
systems = None
def setup():
size(640, 360)
systems = []
def draw():
background(0)
for ps in systems:
ps.run()
ps.addParticle()
if not systems:
fill(255)
textAlign(CENTER)
text("click mouse to add particle systems", width / 2, height / 2)
def mousePressed():
systems.append(ParticleSystem(1, PVector(mouseX, mouseY)))
| 20.076923 | 74 | 0.704981 |
7642637d3c29176328f3ca35a05a393c8d4bf9f7 | 7,369 | py | Python | app.py | Rodrigoh2702/calendarizaciones | e1e9f1fac0a1a53da3a833dd3e94261a5c350989 | [
"MIT"
] | null | null | null | app.py | Rodrigoh2702/calendarizaciones | e1e9f1fac0a1a53da3a833dd3e94261a5c350989 | [
"MIT"
] | null | null | null | app.py | Rodrigoh2702/calendarizaciones | e1e9f1fac0a1a53da3a833dd3e94261a5c350989 | [
"MIT"
] | null | null | null | import tkinter as tk
from backend.proceso import Proceso
from backend.algo.srtf import SRTF
import copy
class App(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (MainWindow, SRTFWindow):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(MainWindow)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class MainWindow(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# self.parent.geometry("500x500")
self.frame = tk.Frame(self)
self.label = tk.Label(self.frame, text="Simulador de calendarización de procesos", justify='center')
self.label.grid(row=0, column=0, sticky="we", pady=40, padx=20, columnspan=2)
self.button1 = tk.Button(self.frame, text='SRTF', command=lambda: controller.show_frame(SRTFWindow))
self.button1.grid(row=1, column=0, pady=(0, 40), sticky="we")
self.button2 = tk.Button(self.frame, text='Round Robin')
self.button2.grid(row=1, column=1, pady=(0, 40), sticky="we")
self.frame.pack()
def srtf_window(self):
self.newWindow = tk.Toplevel(self.parent)
self.app = SRTFWindow(self.newWindow, 0, [])
class SRTFWindow(tk.Frame):
procesos_list = []
count_proceso = 0
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.frame = tk.Frame(self, background="black")
self.create_table_task()
self.create_wait_table()
self.frame.pack()
def add_process_window(self):
AddProcessWindowSRTF(tk.Toplevel(self), self.count_proceso)
def create_table_task(self):
# Table of the processes added
self.process_table = tk.Frame(self.frame)
self.process_table.grid(row=0, column=0, sticky="nwe", padx=(15, 30), pady=(10, 40))
self.name_process_head = tk.Label(self.process_table, text="Proceso", justify='center', background="white")
self.name_process_head.grid(row=0, column=0, sticky="we")
self.entry_process_head = tk.Label(self.process_table, text="Tiempo de entrada", justify='center', background="white")
self.entry_process_head.grid(row=0, column=1, sticky="we")
self.burst_process_head = tk.Label(self.process_table, text="Ráfaga", justify='center', background="white")
self.burst_process_head.grid(row=0, column=2, sticky="we")
self.add_process_button_head = tk.Button(self.process_table, text="+", command=self.add_process_window)
self.add_process_button_head.grid(row=0, column=3, sticky="we")
self.process_table_body = tk.Frame(self.process_table, background="red")
self.process_table_body.grid(row=1, column=0, sticky="we", columnspan=4)
def update_process_list_table(self):
i = 0
for p in self.procesos_list:
name_process = tk.Label(self.process_table_body, text=p.nombre, justify='center', name=f"np{i+1}")
name_process.grid(row=i, column=0, sticky="we")
entry_process = tk.Label(self.process_table_body, text=p.entrada, justify='center', name=f"ep{i+1}")
entry_process.grid(row=i, column=1, sticky="we")
burst_process = tk.Label(self.process_table_body, text=p.rafaga, justify='center', name=f"bp{i+1}")
burst_process.grid(row=i, column=2, sticky="we")
remove_process_button = tk.Button(self.process_table_body, text="-", command=lambda: self.remove_process(p), name=f"rp{i+1}")
remove_process_button.grid(row=i, column=3, sticky="we")
i += 1
def create_wait_table(self):
self.wait_table = tk.Frame(self.frame)
self.wait_table.grid(row=0, column=1, sticky="nwe", padx=(30, 15), pady=(10, 40))
self.wait_process_head = tk.Label(self.wait_table, text="Proceso", justify='center', background="white")
self.wait_process_head.grid(row=0, column=0, sticky="we")
self.wburst_process_head = tk.Label(self.wait_table, text="Tiempo de espera", justify='center', background="white")
self.wburst_process_head.grid(row=0, column=1, sticky="we")
self.wprocess_table_body = tk.Frame(self.wait_table, background="red")
self.wprocess_table_body.grid(row=1, column=0, sticky="we", columnspan=2)
def update_process_wait_table(self):
i = 0
for p in self.procesos_list:
name_process = tk.Label(self.wprocess_table_body, text=p.nombre, justify='center', name=f"wnp{i+1}")
name_process.grid(row=i, column=0, sticky="we")
burst_process = tk.Label(self.wprocess_table_body, text=p.wait, justify='center', name=f"wwp{i+1}")
burst_process.grid(row=i, column=1, sticky="we")
i += 1
def remove_process(self, process):
for w in self.process_table_body.winfo_children():
w.destroy()
for w in self.wprocess_table_body.winfo_children():
w.destroy()
self.count_proceso -= 1
self.procesos_list.remove(process)
self.update_process_list_table()
self.update_process_wait_table()
def update_gantt(self):
aux = self.procesos_list
for p in aux:
print("ayuda dios ", p)
algo = SRTF(*aux)
result = algo.srtf()
print(result)
class AddProcessWindowSRTF():
def __init__(self, parent, count_p):
self.parent = parent
self.count_p = count_p
self.frame = tk.Frame(parent)
self.frame.grid(row=0, column=0)
self.label1 = tk.Label(self.frame, text="Ráfaga", justify='right')
self.label1.grid(row=0, column=0, sticky="we")
self.input1 = tk.Entry(self.frame)
self.input1.grid(row=0, column=1, sticky="we")
self.label2 = tk.Label(self.frame, text="Tiempo de llegada", justify='right')
self.label2.grid(row=1, column=0, sticky="we")
self.input2 = tk.Entry(self.frame)
self.input2.grid(row=1, column=1, sticky="we")
self.add_process_button = tk.Button(self.frame, text="Añadir proceso", command=self.add_process_)
self.add_process_button.grid(row=2, column=0, columnspan=2)
self.count = tk.Label(self.frame, text=f"{self.parent.master.count_proceso}", justify='right')
self.count.grid(row=3, column=0, columnspan=2)
def add_process_(self):
burst = self.input1.get()
entry = self.input2.get()
self.parent.master.count_proceso += 1
name = f"p{self.parent.master.count_proceso}"
process = Proceso(int(entry), int(burst), name)
self.parent.master.procesos_list.append(process)
self.close_windows()
def close_windows(self):
self.parent.master.update_gantt()
self.parent.master.update_process_list_table()
self.parent.master.update_process_wait_table()
self.parent.destroy()
def main():
app = App()
app.mainloop()
if __name__ == '__main__':
main()
| 36.300493 | 137 | 0.645135 |
a18ffc222cc5338097804fe2d91f57fd741d28e0 | 2,391 | py | Python | tests/contrib/django/test_connection.py | vijayperiasamy-eb/dd-trace-py | 2b0d396fc7f76582e8ffedff48933245a77ebaf2 | [
"BSD-3-Clause"
] | null | null | null | tests/contrib/django/test_connection.py | vijayperiasamy-eb/dd-trace-py | 2b0d396fc7f76582e8ffedff48933245a77ebaf2 | [
"BSD-3-Clause"
] | null | null | null | tests/contrib/django/test_connection.py | vijayperiasamy-eb/dd-trace-py | 2b0d396fc7f76582e8ffedff48933245a77ebaf2 | [
"BSD-3-Clause"
] | 1 | 2021-02-11T10:20:14.000Z | 2021-02-11T10:20:14.000Z | import mock
import time
# 3rd party
from django.contrib.auth.models import User
from ddtrace.contrib.django.conf import settings
from ddtrace.contrib.django.patch import apply_django_patches, connections
# testing
from .utils import DjangoTraceTestCase, override_ddtrace_settings
class DjangoConnectionTest(DjangoTraceTestCase):
"""
Ensures that database connections are properly traced
"""
def test_connection(self):
# trace a simple query
start = time.time()
users = User.objects.count()
assert users == 0
end = time.time()
# tests
spans = self.tracer.writer.pop()
assert spans, spans
assert len(spans) == 1
span = spans[0]
assert span.name == 'sqlite.query'
assert span.service == 'defaultdb'
assert span.span_type == 'sql'
assert span.get_tag('django.db.vendor') == 'sqlite'
assert span.get_tag('django.db.alias') == 'default'
assert start < span.start < span.start + span.duration < end
def test_django_db_query_in_resource_not_in_tags(self):
User.objects.count()
spans = self.tracer.writer.pop()
assert spans[0].name == 'sqlite.query'
assert spans[0].resource == 'SELECT COUNT(*) AS "__count" FROM "auth_user"'
assert spans[0].get_tag('sql.query') is None
@override_ddtrace_settings(INSTRUMENT_DATABASE=False)
def test_connection_disabled(self):
# trace a simple query
users = User.objects.count()
assert users == 0
# tests
spans = self.tracer.writer.pop()
assert len(spans) == 0
def test_should_append_database_prefix(self):
# trace a simple query and check if the prefix is correctly
# loaded from Django settings
settings.DEFAULT_DATABASE_PREFIX = 'my_prefix_db'
User.objects.count()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.service == 'my_prefix_db-defaultdb'
def test_apply_django_patches_calls_connections_all(self):
with mock.patch.object(connections, 'all') as mock_connections:
apply_django_patches(patch_rest_framework=False)
assert mock_connections.call_count == 1
assert mock_connections.mock_calls == [mock.call()]
| 32.753425 | 83 | 0.658302 |
c1004b9d5a10d906d787d22a81489d2591cc6768 | 18,652 | py | Python | src/transformers/convert_graph_to_onnx.py | kct22aws/transformers | 04cddaf402591e9f5bdb5f116a111d829a0ce4f4 | [
"Apache-2.0"
] | 5 | 2020-10-30T13:07:02.000Z | 2021-03-17T12:18:30.000Z | src/transformers/convert_graph_to_onnx.py | kct22aws/transformers | 04cddaf402591e9f5bdb5f116a111d829a0ce4f4 | [
"Apache-2.0"
] | null | null | null | src/transformers/convert_graph_to_onnx.py | kct22aws/transformers | 04cddaf402591e9f5bdb5f116a111d829a0ce4f4 | [
"Apache-2.0"
] | 1 | 2020-08-06T05:23:13.000Z | 2020-08-06T05:23:13.000Z | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from os import listdir, makedirs
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from packaging.version import Version, parse
from transformers.file_utils import ModelOutput, is_tf_available, is_torch_available
from transformers.pipelines import Pipeline, pipeline
from transformers.tokenization_utils import BatchEncoding
# This is the minimal required version to
# support some ONNX Runtime features
ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
SUPPORTED_PIPELINES = [
"feature-extraction",
"ner",
"sentiment-analysis",
"fill-mask",
"question-answering",
"text-generation",
"translation_en_to_fr",
"translation_en_to_de",
"translation_en_to_ro",
]
class OnnxConverterArgumentParser(ArgumentParser):
"""
Wraps all the script arguments supported to export transformers models to ONNX IR
"""
def __init__(self):
super().__init__("ONNX Converter")
self.add_argument(
"--pipeline",
type=str,
choices=SUPPORTED_PIPELINES,
default="feature-extraction",
)
self.add_argument(
"--model",
type=str,
required=True,
help="Model's id or path (ex: bert-base-cased)",
)
self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: bert-base-cased)")
self.add_argument(
"--framework",
type=str,
choices=["pt", "tf"],
help="Framework for loading the model",
)
self.add_argument("--opset", type=int, default=11, help="ONNX opset to use")
self.add_argument(
"--check-loading",
action="store_true",
help="Check ONNX is able to load the model",
)
self.add_argument(
"--use-external-format",
action="store_true",
help="Allow exporting model >= than 2Gb",
)
self.add_argument(
"--quantize",
action="store_true",
help="Quantize the neural network to be run with int8",
)
self.add_argument("output")
def generate_identified_filename(filename: Path, identifier: str) -> Path:
"""
Append a string-identifier at the end (before the extension, if any) to the provided filepath
Args:
filename: pathlib.Path The actual path object we would like to add an identifier suffix
identifier: The suffix to add
Returns: String with concatenated identifier at the end of the filename
"""
return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
def check_onnxruntime_requirements(minimum_version: Version):
"""
Check onnxruntime is installed and if the installed version match is recent enough
Raises:
ImportError: If onnxruntime is not installed or too old version is found
"""
try:
import onnxruntime
# Parse the version of the installed onnxruntime
ort_version = parse(onnxruntime.__version__)
# We require 1.4.0 minimum
if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
raise ImportError(
f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
f"Please update onnxruntime by running `pip install --upgrade onnxruntime`"
)
except ImportError:
raise ImportError(
"onnxruntime doesn't seem to be currently installed. "
"Please install the onnxruntime by running `pip install onnxruntime`"
" and relaunch the conversion."
)
def ensure_valid_input(model, tokens, input_names):
"""
Ensure input are presented in the correct order, without any Non
Args:
model: The model used to forward the input data
tokens: BatchEncoding holding the input data
input_names: The name of the inputs
Returns: Tuple
"""
print("Ensuring inputs are in correct order")
model_args_name = model.forward.__code__.co_varnames
model_args, ordered_input_names = [], []
for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument
if arg_name in input_names:
ordered_input_names.append(arg_name)
model_args.append(tokens[arg_name])
else:
print(f"{arg_name} is not present in the generated input list.")
break
print(f"Generated inputs order: {ordered_input_names}")
return ordered_input_names, tuple(model_args)
def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:
"""
Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model
Args:
nlp: The pipeline object holding the model to be exported
framework: The framework identifier to dispatch to the correct inference scheme (pt/tf)
Returns:
- List of the inferred input variable names
- List of the inferred output variable names
- Dictionary with input/output variables names as key and shape tensor as value
- a BatchEncoding reference which was used to infer all the above information
"""
def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):
if isinstance(tensor, (tuple, list)):
return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]
else:
# Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"}
if is_input:
if len(tensor.shape) == 2:
axes[1] = "sequence"
else:
raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})")
else:
seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]
axes.update({dim: "sequence" for dim in seq_axes})
print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
return axes
tokens = nlp.tokenizer("This is a sample output", return_tensors=framework)
seq_len = tokens.input_ids.shape[-1]
outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens)
if isinstance(outputs, ModelOutput):
outputs = outputs.to_tuple()
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
# Generate input names & axes
input_vars = list(tokens.keys())
input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}
# flatten potentially grouped outputs (past for gpt2, attentions)
outputs_flat = []
for output in outputs:
if isinstance(output, (tuple, list)):
outputs_flat.extend(output)
else:
outputs_flat.append(output)
# Generate output names & axes
output_names = [f"output_{i}" for i in range(len(outputs_flat))]
output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}
# Create the aggregated axes representation
dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
return input_vars, output_names, dynamic_axes, tokens
def load_graph_from_args(
pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs
) -> Pipeline:
"""
Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model
Args:
pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)
framework: The actual model to convert the pipeline from ("pt" or "tf")
model: The model name which will be loaded by the pipeline
tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value
Returns: Pipeline object
"""
# If no tokenizer provided
if tokenizer is None:
tokenizer = model
# Check the wanted framework is available
if framework == "pt" and not is_torch_available():
raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
if framework == "tf" and not is_tf_available():
raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})")
# Allocate tokenizer and model
return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
"""
Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
use_external_format: Split the model definition from its parameters to allow model bigger than 2GB
Returns:
"""
if not is_torch_available():
raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
import torch
from torch.onnx import export
print(f"Using framework PyTorch: {torch.__version__}")
with torch.no_grad():
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt")
ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names)
export(
nlp.model,
model_args,
f=output.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=use_external_format,
enable_onnx_checker=True,
opset_version=opset,
)
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
"""
Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow
"""
if not is_tf_available():
raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
try:
import tensorflow as tf
from keras2onnx import __version__ as k2ov
from keras2onnx import convert_keras, save_model
print(f"Using framework TensorFlow: {tf.version.VERSION}, keras2onnx: {k2ov}")
# Build
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf")
# Forward
nlp.model.predict(tokens.data)
onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset)
save_model(onnx_model, output.as_posix())
except ImportError as e:
raise Exception(f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first.")
def convert(
framework: str,
model: str,
output: Path,
opset: int,
tokenizer: Optional[str] = None,
use_external_format: bool = False,
pipeline_name: str = "feature-extraction",
**model_kwargs
):
"""
Convert the pipeline object to the ONNX Intermediate Representation (IR) format
Args:
framework: The framework the pipeline is backed by ("pt" or "tf")
model: The name of the model to load for the pipeline
output: The path where the ONNX graph will be stored
opset: The actual version of the ONNX operator set to use
tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided
use_external_format:
Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only)
pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.)
model_kwargs: Keyword arguments to be forwarded to the model constructor
Returns:
"""
print(f"ONNX opset version set to: {opset}")
# Load the pipeline
nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)
if not output.parent.exists():
print(f"Creating folder {output.parent}")
makedirs(output.parent.as_posix())
elif len(listdir(output.parent.as_posix())) > 0:
raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion")
# Export the graph
if framework == "pt":
convert_pytorch(nlp, opset, output, use_external_format)
else:
convert_tensorflow(nlp, opset, output)
def optimize(onnx_model_path: Path) -> Path:
"""
Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the
optimizations possibl
Args:
onnx_model_path: filepath where the model binary description is stored
Returns: Path where the optimized model binary description has been saved
"""
from onnxruntime import InferenceSession, SessionOptions
# Generate model name with suffix "optimized"
opt_model_path = generate_identified_filename(onnx_model_path, "-optimized")
sess_option = SessionOptions()
sess_option.optimized_model_filepath = opt_model_path.as_posix()
_ = InferenceSession(onnx_model_path.as_posix(), sess_option)
print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}")
print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\")
return opt_model_path
def quantize(onnx_model_path: Path) -> Path:
"""
Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU
Args:
onnx_model_path: Path to location the exported ONNX model is stored
Returns: The Path generated for the quantized
"""
import onnx
from onnxruntime.quantization import QuantizationMode, quantize
onnx_model = onnx.load(onnx_model_path.as_posix())
# Discussed with @yufenglee from ONNX runtime, this will be address in the next release of onnxruntime
print(
"As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint.\n"
"This limitation will be removed in the next release of onnxruntime."
)
quantized_model = quantize(
model=onnx_model,
quantization_mode=QuantizationMode.IntegerOps,
force_fusions=True,
symmetric_weight=True,
)
# Append "-quantized" at the end of the model's name
quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized")
# Save model
print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}")
onnx.save_model(quantized_model, quantized_model_path.as_posix())
return quantized_model_path
def verify(path: Path):
from onnxruntime import InferenceSession, SessionOptions
from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
print(f"Checking ONNX model loading from: {path} ...")
try:
onnx_options = SessionOptions()
_ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"])
print(f"Model {path} correctly loaded: \N{heavy check mark}")
except RuntimeException as re:
print(f"Error while loading the model {re}: \N{heavy ballot x}")
if __name__ == "__main__":
parser = OnnxConverterArgumentParser()
args = parser.parse_args()
# Make sure output is absolute path
args.output = Path(args.output).absolute()
try:
print("\n====== Converting model to ONNX ======")
# Convert
convert(
args.framework,
args.model,
args.output,
args.opset,
args.tokenizer,
args.use_external_format,
args.pipeline,
)
if args.quantize:
# Ensure requirements for quantization on onnxruntime is met
check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION)
# onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch
if args.framework == "tf":
print(
"\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n"
"\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n"
"\t For more information, please refer to the onnxruntime documentation:\n"
"\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n"
)
print("\n====== Optimizing ONNX model ======")
# Quantization works best when using the optimized version of the model
args.optimized_output = optimize(args.output)
# Do the quantization on the right graph
args.quantized_output = quantize(args.optimized_output)
# And verify
if args.check_loading:
print("\n====== Check exported ONNX model(s) ======")
verify(args.output)
if hasattr(args, "optimized_output"):
verify(args.optimized_output)
if hasattr(args, "quantized_output"):
verify(args.quantized_output)
except Exception as e:
print(f"Error while converting the model: {e}")
exit(1)
| 36.788955 | 119 | 0.66958 |
08dafe6011e7ec9c20106396fe2461e7f4b7f55b | 5,061 | py | Python | disentanglement_lib/evaluation/metrics/local_modularity_test.py | travers-rhodes/disentanglement_lib | 73d4b995e88efdd5ffbe98a72e48a620c58f4dc7 | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/evaluation/metrics/local_modularity_test.py | travers-rhodes/disentanglement_lib | 73d4b995e88efdd5ffbe98a72e48a620c58f4dc7 | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/evaluation/metrics/local_modularity_test.py | travers-rhodes/disentanglement_lib | 73d4b995e88efdd5ffbe98a72e48a620c58f4dc7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
# Copyright 2021 Travers Rhodes. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was modified by Travers Rhodes
"""Tests for local_modularity.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from disentanglement_lib.data.ground_truth import dummy_data
from disentanglement_lib.evaluation.metrics import utils, local_modularity
import numpy as np
from six.moves import range
import gin.tf
def _identity_discretizer(target, num_bins):
del num_bins
return target
def initialize_gin():
gin.bind_parameter("discretizer.discretizer_fn", _identity_discretizer)
#gin.bind_parameter("discretizer.discretizer_fn", utils._histogram_discretize)
gin.bind_parameter("discretizer.num_bins", 10)
gin.bind_parameter("local_sample_factors.denylist_factors", [])
gin.bind_parameter("local_sample_factors.locality_proportion", 1.0)
gin.bind_parameter("local_sample_factors.continuity_cutoff", 0)
gin.bind_parameter("local_modularity.num_train", 20000)
gin.bind_parameter("local_modularity.num_local_clusters", 3)
class ModularityExplicitnessTest(absltest.TestCase):
def test_metric(self):
initialize_gin()
ground_truth_data = dummy_data.IdentityObservationsData()
representation_function = lambda x: np.array(x, dtype=np.float64)
random_state = np.random.RandomState(0)
scores = local_modularity.compute_local_modularity(
ground_truth_data, representation_function, random_state)
self.assertBetween(scores["modularity_score"], 0.9, 1.0)
def test_bad_metric(self):
initialize_gin()
ground_truth_data = dummy_data.IdentityObservationsData()
random_state_rep = np.random.RandomState(0)
# The representation which randomly permutes the factors, should have equal
# non-zero MI which should give a low modularity score.
def representation_function(x):
code = np.array(x, dtype=np.float64)
for i in range(code.shape[0]):
code[i, :] = random_state_rep.permutation(code[i, :])
return code
random_state = np.random.RandomState(0)
scores = local_modularity.compute_local_modularity(
ground_truth_data, representation_function, random_state)
self.assertBetween(scores["modularity_score"], 0.0, 0.2)
def test_duplicated_latent_space(self):
initialize_gin()
ground_truth_data = dummy_data.IdentityObservationsData()
def representation_function(x):
x = np.array(x, dtype=np.float64)
return np.hstack([x, x])
random_state = np.random.RandomState(0)
scores = local_modularity.compute_local_modularity(
ground_truth_data, representation_function, random_state)
self.assertBetween(scores["modularity_score"], 0.9, 1.0)
# the representation here folds the dataset in on itself.
# it looks like MIG counts this as a relatively good discretization which
# makes sense because mutual information is not correlation
def test_locally_good_metric(self):
initialize_gin()
gin.bind_parameter("discretizer.discretizer_fn", utils._histogram_discretize)
num_factors = 3
num_possible_values = 40
ground_truth_data = dummy_data.IdentityObservationsCustomSize(
[num_possible_values] * num_factors)
random_state = np.random.RandomState(30)
# modularity is so weird. We want a factor that globally
# depends on multiple variables but locally depends on only
# one variable. Easiest is something like Floor[(A+B) / 5] + C / 40
# which for small changes of A,B,C only depends on changes of C
# and globally depends jointly on A,B
def representation_function(x):
representation = np.floor((x[:,0:1] + x[:,1:2])/12) + x[:,2:3] / 40
return representation
# if you sample globally (lp = 1) then you get a low MIG score
# if you sample locally (lp=0.1) then you get a high MIG score
for lp, exp_min_score, exp_max_score in [(1.0, 0.0, 0.3), (0.1, 0.8, 1.0)]:
gin.bind_parameter("local_sample_factors.locality_proportion" ,lp)
gin.bind_parameter("local_modularity.num_local_clusters",3)
scores = local_modularity.compute_local_modularity(
ground_truth_data, representation_function, random_state,
batch_size=30)
self.assertBetween(scores["modularity_score"], exp_min_score, exp_max_score)
if __name__ == "__main__":
logging.set_verbosity(logging.DEBUG)
absltest.main()
| 43.25641 | 84 | 0.753013 |
ccf2161242a1d75bd20b4a0b7769d6579c86d46e | 774 | py | Python | trigger/pulseRatioWOnoise.py | greatofdream/pmtTest | d8f1fc8e94c3999516b6eef01e228311e6135053 | [
"MIT"
] | null | null | null | trigger/pulseRatioWOnoise.py | greatofdream/pmtTest | d8f1fc8e94c3999516b6eef01e228311e6135053 | [
"MIT"
] | null | null | null | trigger/pulseRatioWOnoise.py | greatofdream/pmtTest | d8f1fc8e94c3999516b6eef01e228311e6135053 | [
"MIT"
] | null | null | null | import numpy as np, h5py
import argparse
if __name__=="__main__":
psr = argparse.ArgumentParser()
psr.add_argument('-i', dest='ipt', help='input text file')
psr.add_argument('-o', dest='opt', help='output text file')
psr.add_argument('--ref', help='the reference of noise')
args = psr.parse_args()
noise = np.genfromtxt(args.ref, dtype=[('thre', '<i2'), ('noi', '<f4')])
pulse = np.genfromtxt(args.ipt, dtype=[('prompt', '<f4'), ('delay1', '<f4'), ('delay10', '<f4'), ('triggerNum', '<i4')])
timeInterval = np.array([200, 700, 9000])
noiserate = noise['noi'].reshape((2,1))*timeInterval*1e-6
pulse['prompt'] -= noiserate[:,0]
pulse['delay1'] -= noiserate[:,1]
pulse['delay10'] -= noiserate[:,2]
np.savetxt(args.opt, pulse) | 48.375 | 124 | 0.614987 |
7121561ed9035de74e1c98ca82d378979367c3eb | 1,062 | py | Python | kubernetes_asyncio/test/test_v1beta1_network_policy_spec.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | 1 | 2021-01-13T09:28:57.000Z | 2021-01-13T09:28:57.000Z | kubernetes_asyncio/test/test_v1beta1_network_policy_spec.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1beta1_network_policy_spec.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1beta1_network_policy_spec import V1beta1NetworkPolicySpec # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1beta1NetworkPolicySpec(unittest.TestCase):
"""V1beta1NetworkPolicySpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1NetworkPolicySpec(self):
"""Test V1beta1NetworkPolicySpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1beta1_network_policy_spec.V1beta1NetworkPolicySpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.902439 | 119 | 0.741055 |
aabc6334a63c35c9447a1544824aaf83e3382dcb | 14,202 | py | Python | requests_oauthlib/oauth2_session.py | Huddle/requests-oauthlib | 503146ae39c1c050abf39d2fd5a43595e621cdf7 | [
"0BSD"
] | null | null | null | requests_oauthlib/oauth2_session.py | Huddle/requests-oauthlib | 503146ae39c1c050abf39d2fd5a43595e621cdf7 | [
"0BSD"
] | null | null | null | requests_oauthlib/oauth2_session.py | Huddle/requests-oauthlib | 503146ae39c1c050abf39d2fd5a43595e621cdf7 | [
"0BSD"
] | null | null | null | from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self.client_id = client_id or client.client_id
self.scope = scope
self.redirect_uri = redirect_uri
self.token = token or {}
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
self._client = client or WebApplicationClient(client_id, token=token)
self._client._populate_attributes(token or {})
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set([]),
'refresh_token_response': set([]),
'protected_request': set([]),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
headers=None, verify=True, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_code parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
if method.upper() == 'POST':
r = self.post(token_url, data=dict(urldecode(body)),
headers=headers, auth=auth,
verify=verify)
log.debug('Prepared fetch token request body %s', body)
elif method.upper() == 'GET':
# if method is not 'POST', switch body to querystring and GET
r = self.get(token_url, params=dict(urldecode(body)),
headers=headers, auth=auth,
verify=verify)
log.debug('Prepared fetch token request querystring %s', body)
else:
raise ValueError('The method kwarg must be POST or GET.')
log.debug('Request to fetch token completed with status %s.',
r.status_code)
log.debug('Request headers were %s', r.request.headers)
log.debug('Request body was %s', r.request.body)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['access_token_response']))
for hook in self.compliance_hook['access_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug('Obtained token %s.', self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response,
state=self._state)
self.token = self._client.token
return self.token
def refresh_token(self, token_url, refresh_token=None, body='', auth=None,
**kwargs):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError('No token endpoint set for auto_refresh.')
if not is_secure_transport(token_url):
raise InsecureTransportError()
# Need to nullify token to prevent it from being added to the request
refresh_token = refresh_token or self.token.get('refresh_token')
self.token = {}
log.debug('Adding auto refresh key word arguments %s.',
self.auto_refresh_kwargs)
kwargs.update(self.auto_refresh_kwargs)
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
log.debug('Prepared refresh token request body %s', body)
r = self.post(token_url, data=dict(urldecode(body)), auth=auth)
log.debug('Request to refresh token completed with status %s.',
r.status_code)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['refresh_token_response']))
for hook in self.compliance_hook['refresh_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if not 'refresh_token' in self.token:
log.debug('No new refresh token given. Re-using old.')
self.token['refresh_token'] = refresh_token
return self.token
def request(self, method, url, data=None, headers=None, **kwargs):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token:
log.debug('Invoking %d protected resource request hooks.',
len(self.compliance_hook['protected_request']))
for hook in self.compliance_hook['protected_request']:
log.debug('Invoking hook %s.', hook)
url, headers, data = hook(url, headers, data)
log.debug('Adding token %s to request.', self.token)
try:
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug('Auto refresh is set, attempting to refresh at %s.',
self.auto_refresh_url)
token = self.refresh_token(self.auto_refresh_url)
if self.token_updater:
log.debug('Updating token to %s using %s.',
token, self.token_updater)
self.token_updater(token)
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
else:
raise TokenUpdated(token)
else:
raise
log.debug('Requesting url %s using method %s.', url, method)
log.debug('Supplying headers %s and data %s', headers, data)
log.debug('Passing through key word arguments %s.', kwargs)
return super(OAuth2Session, self).request(method, url,
headers=headers, data=data, **kwargs)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook)
| 47.182724 | 87 | 0.623715 |
0299b702b90ae551cb69cf100ae411e60e0cc0f1 | 34,917 | py | Python | dis_snek/models/snek/application_commands.py | ShardlessBun/Dis-Snek | fd15b06c531e82a9682a7b139deec0df7b2aa2d0 | [
"MIT"
] | null | null | null | dis_snek/models/snek/application_commands.py | ShardlessBun/Dis-Snek | fd15b06c531e82a9682a7b139deec0df7b2aa2d0 | [
"MIT"
] | null | null | null | dis_snek/models/snek/application_commands.py | ShardlessBun/Dis-Snek | fd15b06c531e82a9682a7b139deec0df7b2aa2d0 | [
"MIT"
] | null | null | null | import asyncio
import inspect
import logging
import re
from enum import IntEnum
from typing import TYPE_CHECKING, Callable, Coroutine, Dict, List, Union, Optional, Any
import attr
import dis_snek.models.discord.channel as channel
from dis_snek.client.const import (
GLOBAL_SCOPE,
CONTEXT_MENU_NAME_LENGTH,
SLASH_OPTION_NAME_LENGTH,
SLASH_CMD_NAME_LENGTH,
SLASH_CMD_MAX_OPTIONS,
SLASH_CMD_MAX_DESC_LENGTH,
MISSING,
logger_name,
Absent,
)
from dis_snek.client.mixins.serialization import DictSerializationMixin
from dis_snek.client.utils.attr_utils import docs
from dis_snek.client.utils.misc_utils import get_parameters
from dis_snek.client.utils.serializer import no_export_meta
from dis_snek.models.discord.enums import ChannelTypes, CommandTypes
from dis_snek.models.discord.role import Role
from dis_snek.models.discord.snowflake import to_snowflake, to_snowflake_list
from dis_snek.models.discord.user import BaseUser
from dis_snek.models.snek.auto_defer import AutoDefer
from dis_snek.models.snek.command import BaseCommand
if TYPE_CHECKING:
from dis_snek.models.discord.snowflake import Snowflake_Type
from dis_snek.models.snek.context import Context
__all__ = [
"OptionTypes",
"PermissionTypes",
"CallbackTypes",
"Permission",
"InteractionCommand",
"ContextMenu",
"SlashCommandChoice",
"SlashCommand",
"ComponentCommand",
"slash_command",
"subcommand",
"context_menu",
"component_callback",
"slash_option",
"slash_permission",
"auto_defer",
"application_commands_to_dict",
"sync_needed",
]
log = logging.getLogger(logger_name)
class OptionTypes(IntEnum):
"""Option types supported by slash commands."""
SUB_COMMAND = 1
SUB_COMMAND_GROUP = 2
STRING = 3
INTEGER = 4
BOOLEAN = 5
USER = 6
CHANNEL = 7
ROLE = 8
MENTIONABLE = 9
NUMBER = 10
@classmethod
def from_type(cls, t: type) -> "OptionTypes":
"""
Convert data types to their corresponding OptionType.
parameters:
t: The datatype to convert
returns:
OptionType or None
"""
if issubclass(t, str):
return cls.STRING
if issubclass(t, int):
return cls.INTEGER
if issubclass(t, bool):
return cls.BOOLEAN
if issubclass(t, BaseUser):
return cls.USER
if issubclass(t, channel.BaseChannel):
return cls.CHANNEL
if issubclass(t, Role):
return cls.ROLE
if issubclass(t, float):
return cls.NUMBER
class PermissionTypes(IntEnum):
"""Types of target supported by the interaction permission."""
ROLE = 1
USER = 2
@classmethod
def from_type(cls, t: type) -> "PermissionTypes":
if issubclass(t, Role):
return cls.ROLE
if issubclass(t, BaseUser):
return cls.USER
class CallbackTypes(IntEnum):
"""Types of callback supported by interaction response."""
PONG = 1
CHANNEL_MESSAGE_WITH_SOURCE = 4
DEFERRED_CHANNEL_MESSAGE_WITH_SOURCE = 5
DEFERRED_UPDATE_MESSAGE = 6
UPDATE_MESSAGE = 7
AUTOCOMPLETE_RESULT = 8
@attr.s(slots=True, hash=True)
class Permission:
"""
Represents a interaction permission.
parameters:
id: The id of the role or user.
guild_id: The guild this permission belongs to
type: The type of id (user or role)
permission: The state of permission. ``True`` to allow, ``False``, to disallow.
"""
id: "Snowflake_Type" = attr.ib(converter=to_snowflake)
guild_id: "Snowflake_Type" = attr.ib(converter=to_snowflake, metadata=no_export_meta)
type: Union[PermissionTypes, int] = attr.ib(converter=PermissionTypes)
permission: bool = attr.ib(default=True)
def to_dict(self) -> dict:
"""
Convert this object into a dict ready for discord.
returns:
Representation of this object
"""
data = attr.asdict(self)
data.pop("guild_id", None)
data["id"] = str(data["id"])
return data
@attr.s(slots=True, kw_only=True, on_setattr=[attr.setters.convert, attr.setters.validate])
class InteractionCommand(BaseCommand):
"""
Represents a discord abstract interaction command.
parameters:
scope: Denotes whether its global or for specific guild.
default_permission: Is this command available to all users?
permissions: Map of guild id and its respective list of permissions to apply.
cmd_id: The id of this command given by discord.
callback: The coroutine to callback when this interaction is received.
"""
name: str = attr.ib(metadata=docs("1-32 character name") | no_export_meta)
scopes: List["Snowflake_Type"] = attr.ib(
default=[GLOBAL_SCOPE],
converter=to_snowflake_list,
metadata=docs("The scopes of this interaction. Global or guild ids") | no_export_meta,
)
default_permission: bool = attr.ib(
default=True, metadata=docs("whether this command is enabled by default when the app is added to a guild")
)
permissions: Optional[List[Union[Permission, Dict]]] = attr.ib(
factory=dict, metadata=docs("The permissions of this interaction")
)
cmd_id: Dict[str, "Snowflake_Type"] = attr.ib(
factory=dict, metadata=docs("The unique IDs of this commands") | no_export_meta
) # scope: cmd_id
callback: Callable[..., Coroutine] = attr.ib(
default=None, metadata=docs("The coroutine to call when this interaction is received") | no_export_meta
)
auto_defer: "AutoDefer" = attr.ib(
default=MISSING,
metadata=docs("A system to automatically defer this command after a set duration") | no_export_meta,
)
def __attrs_post_init__(self) -> None:
if self.callback is not None:
if hasattr(self.callback, "auto_defer"):
self.auto_defer = self.callback.auto_defer
super().__attrs_post_init__()
@property
def resolved_name(self) -> str:
"""A representation of this interaction's name."""
return self.name
def get_cmd_id(self, scope: "Snowflake_Type") -> "Snowflake_Type":
return self.cmd_id.get(scope, self.cmd_id.get(GLOBAL_SCOPE, None))
@property
def is_subcommand(self) -> bool:
return False
async def _permission_enforcer(self, ctx: "Context") -> bool:
"""A check that enforces Discord permissions."""
# I wish this wasn't needed, but unfortunately Discord permissions cant be trusted to actually prevent usage
for perm in self.permissions or []:
if perm.type == PermissionTypes.ROLE:
if ctx.author.has_role(perm.id):
if perm.permission is True:
return True
elif self.default_permission is True:
return False
elif perm.type == PermissionTypes.USER:
if ctx.author.id == perm.id:
if perm.permission is True:
return True
elif self.default_permission is True:
return False
return self.default_permission
@attr.s(slots=True, kw_only=True, on_setattr=[attr.setters.convert, attr.setters.validate])
class ContextMenu(InteractionCommand):
"""
Represents a discord context menu.
parameters:
name: The name of this entry.
type: The type of entry (user or message).
"""
name: str = attr.ib(metadata=docs("1-32 character name"))
type: CommandTypes = attr.ib(metadata=docs("The type of command, defaults to 1 if not specified"))
@name.validator
def _name_validator(self, attribute: str, value: str) -> None:
if not 1 <= len(value) <= CONTEXT_MENU_NAME_LENGTH:
raise ValueError("Context Menu name attribute must be between 1 and 32 characters")
@type.validator
def _type_validator(self, attribute: str, value: int):
if not isinstance(value, CommandTypes):
if value not in CommandTypes.__members__.values():
raise ValueError("Context Menu type not recognised, please consult the docs.")
elif value == CommandTypes.CHAT_INPUT:
raise ValueError(
"The CHAT_INPUT type is basically slash commands. Please use the @slash_command() " "decorator instead."
)
@attr.s(slots=True)
class SlashCommandChoice(DictSerializationMixin):
"""
Represents a discord slash command choice.
parameters:
name: The name the user will see
value: The data sent to your code when this choice is used
"""
name: str = attr.ib()
value: Union[str, int, float] = attr.ib()
@attr.s(slots=True, on_setattr=[attr.setters.convert, attr.setters.validate])
class SlashCommandOption(DictSerializationMixin):
"""
Represents a discord slash command option.
parameters:
name: The name of this option
type: The type of option
description: The description of this option
required: "This option must be filled to use the command"
choices: A list of choices the user has to pick between
channel_types: The channel types permitted. The option needs to be a channel
min_value: The minimum value permitted. The option needs to be an integer or float
max_value: The maximum value permitted. The option needs to be an integer or float
"""
name: str = attr.ib()
type: Union[OptionTypes, int] = attr.ib()
description: str = attr.ib(default="No Description Set")
required: bool = attr.ib(default=True)
autocomplete: bool = attr.ib(default=False)
choices: List[Union[SlashCommandChoice, Dict]] = attr.ib(factory=list)
channel_types: Optional[list[Union[ChannelTypes, int]]] = attr.ib(default=None)
min_value: Optional[float] = attr.ib(default=None)
max_value: Optional[float] = attr.ib(default=None)
@name.validator
def _name_validator(self, attribute: str, value: str) -> None:
if not re.match(rf"^[\w-]{{1,{SLASH_CMD_NAME_LENGTH}}}$", value) or value != value.lower():
raise ValueError(
f"Options names must be lower case and match this regex: ^[\w-]{1, {SLASH_CMD_NAME_LENGTH} }$" # noqa: W605
)
@description.validator
def _description_validator(self, attribute: str, value: str) -> None:
if not 1 <= len(value) <= SLASH_OPTION_NAME_LENGTH:
raise ValueError("Options must be between 1 and 100 characters long")
@type.validator
def _type_validator(self, attribute: str, value: int) -> None:
if value == OptionTypes.SUB_COMMAND or value == OptionTypes.SUB_COMMAND_GROUP:
raise ValueError(
"Options cannot be SUB_COMMAND or SUB_COMMAND_GROUP. If you want to use subcommands, "
"see the @sub_command() decorator."
)
@channel_types.validator
def _channel_types_validator(self, attribute: str, value: Optional[list[OptionTypes]]) -> None:
if value is not None:
if self.type != OptionTypes.CHANNEL:
raise ValueError("The option needs to be CHANNEL to use this")
allowed_int = [channel_type.value for channel_type in ChannelTypes]
for item in value:
if (item not in allowed_int) and (item not in ChannelTypes):
raise ValueError(f"{value} is not allowed here")
@min_value.validator
def _min_value_validator(self, attribute: str, value: Optional[float]) -> None:
if value is not None:
if self.type != OptionTypes.INTEGER and self.type != OptionTypes.NUMBER:
raise ValueError("`min_value` can only be supplied with int or float options")
if self.type == OptionTypes.INTEGER:
if isinstance(value, float):
raise ValueError("`min_value` needs to be an int in an int option")
if self.max_value is not None and self.min_value is not None:
if self.max_value < self.min_value:
raise ValueError("`min_value` needs to be <= than `max_value`")
@max_value.validator
def _max_value_validator(self, attribute: str, value: Optional[float]) -> None:
if value is not None:
if self.type != OptionTypes.INTEGER and self.type != OptionTypes.NUMBER:
raise ValueError("`max_value` can only be supplied with int or float options")
if self.type == OptionTypes.INTEGER:
if isinstance(value, float):
raise ValueError("`max_value` needs to be an int in an int option")
if self.max_value and self.min_value:
if self.max_value < self.min_value:
raise ValueError("`min_value` needs to be <= than `max_value`")
@attr.s(slots=True, kw_only=True, on_setattr=[attr.setters.convert, attr.setters.validate])
class SlashCommand(InteractionCommand):
name: str = attr.ib()
description: str = attr.ib("No Description Set")
group_name: str = attr.ib(default=None, metadata=no_export_meta)
group_description: str = attr.ib(default="No Description Set", metadata=no_export_meta)
sub_cmd_name: str = attr.ib(default=None, metadata=no_export_meta)
sub_cmd_description: str = attr.ib(default="No Description Set", metadata=no_export_meta)
options: List[Union[SlashCommandOption, Dict]] = attr.ib(factory=list)
autocomplete_callbacks: dict = attr.ib(factory=dict, metadata=no_export_meta)
@property
def resolved_name(self) -> str:
return f"{self.name}{f' {self.group_name}' if self.group_name else ''}{f' {self.sub_cmd_name}' if self.sub_cmd_name else ''}"
@property
def is_subcommand(self) -> bool:
return self.sub_cmd_name is not None
def __attrs_post_init__(self) -> None:
params = get_parameters(self.callback)
for name, val in params.items():
if val.annotation and isinstance(val.annotation, SlashCommandOption):
if not self.options:
self.options = []
val.annotation.name = name
self.options.append(val.annotation)
if self.callback is not None:
if hasattr(self.callback, "options"):
if not self.options:
self.options = []
self.options += self.callback.options
if hasattr(self.callback, "permissions"):
self.permissions = self.callback.permissions
super().__attrs_post_init__()
def to_dict(self) -> dict:
data = super().to_dict()
if self.is_subcommand:
data["name"] = self.sub_cmd_name
data["description"] = self.sub_cmd_description
data.pop("default_permission", None)
data.pop("permissions", None)
return data
@name.validator
@group_name.validator
@sub_cmd_name.validator
def name_validator(self, attribute: str, value: str) -> None:
if value:
if not re.match(rf"^[\w-]{{1,{SLASH_CMD_NAME_LENGTH}}}$", value) or value != value.lower():
raise ValueError(
f"Slash Command names must be lower case and match this regex: ^[\w-]{1, {SLASH_CMD_NAME_LENGTH} }$" # noqa: W605
)
@description.validator
@group_description.validator
@sub_cmd_description.validator
def description_validator(self, attribute: str, value: str) -> None:
if not 1 <= len(value) <= SLASH_CMD_MAX_DESC_LENGTH:
raise ValueError(f"Description must be between 1 and {SLASH_CMD_MAX_DESC_LENGTH} characters long")
@options.validator
def options_validator(self, attribute: str, value: List) -> None:
if value:
if isinstance(value, list):
if len(value) > SLASH_CMD_MAX_OPTIONS:
raise ValueError(f"Slash commands can only hold {SLASH_CMD_MAX_OPTIONS} options")
if value != sorted(
value,
key=lambda x: x.required if isinstance(x, SlashCommandOption) else x["required"],
reverse=True,
):
raise ValueError("Required options must go before optional options")
else:
raise TypeError("Options attribute must be either None or a list of options")
def autocomplete(self, option_name: str) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as an option autocomplete."""
def wrapper(call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
if not asyncio.iscoroutinefunction(call):
raise TypeError("autocomplete must be coroutine")
self.autocomplete_callbacks[option_name] = call
# automatically set the option's autocomplete attribute to True
for opt in self.options:
if isinstance(opt, dict) and opt["name"] == option_name:
opt["autocomplete"] = True
elif isinstance(opt, SlashCommandOption) and opt.name == option_name:
opt.autocomplete = True
return call
option_name = option_name.lower()
return wrapper
def subcommand(
self,
sub_cmd_name: str,
group_name: str = None,
group_description: str = "No Description Set",
sub_cmd_description: Absent[str] = MISSING,
options: List[Union[SlashCommandOption, Dict]] = None,
) -> Callable[..., "SlashCommand"]:
def wrapper(call: Callable[..., Coroutine]) -> "SlashCommand":
if not asyncio.iscoroutinefunction(call):
raise TypeError("Subcommand must be coroutine")
_description = sub_cmd_description
if _description is MISSING:
_description = call.__doc__ if call.__doc__ else "No Description Set"
return SlashCommand(
name=self.name,
description=self.description,
group_name=group_name,
group_description=group_description,
sub_cmd_name=sub_cmd_name,
sub_cmd_description=_description,
options=options,
callback=call,
)
return wrapper
@attr.s(slots=True, kw_only=True, on_setattr=[attr.setters.convert, attr.setters.validate])
class ComponentCommand(InteractionCommand):
# right now this adds no extra functionality, but for future dev ive implemented it
listeners: list[str] = attr.ib(factory=list)
##############
# Decorators #
##############
def slash_command(
name: str,
description: Absent[str] = MISSING,
scopes: Absent[List["Snowflake_Type"]] = MISSING,
options: Optional[List[Union[SlashCommandOption, Dict]]] = None,
default_permission: bool = True,
permissions: Optional[List[Union[Permission, Dict]]] = None,
sub_cmd_name: str = None,
group_name: str = None,
sub_cmd_description: str = "No Description Set",
group_description: str = "No Description Set",
) -> Callable[[Coroutine], SlashCommand]:
"""
A decorator to declare a coroutine as a slash command.
note:
While the base and group descriptions arent visible in the discord client, currently.
We strongly advise defining them anyway, if you're using subcommands, as Discord has said they will be visible in
one of the future ui updates.
parameters:
name: 1-32 character name of the command
description: 1-100 character description of the command
scope: The scope this command exists within
options: The parameters for the command, max 25
default_permission: Whether the command is enabled by default when the app is added to a guild
permissions: The roles or users who can use this command
sub_cmd_name: 1-32 character name of the subcommand
sub_cmd_description: 1-100 character description of the subcommand
group_name: 1-32 character name of the group
group_description: 1-100 character description of the group
returns:
SlashCommand Object
"""
def wrapper(func) -> SlashCommand:
if not asyncio.iscoroutinefunction(func):
raise ValueError("Commands must be coroutines")
_description = description
if _description is MISSING:
_description = func.__doc__ if func.__doc__ else "No Description Set"
cmd = SlashCommand(
name=name,
group_name=group_name,
group_description=group_description,
sub_cmd_name=sub_cmd_name,
sub_cmd_description=sub_cmd_description,
description=_description,
scopes=scopes if scopes else [GLOBAL_SCOPE],
default_permission=default_permission,
permissions=permissions or {},
callback=func,
options=options,
)
return cmd
return wrapper
def subcommand(
base: str,
*,
subcommand_group: Optional[str] = None,
name: Optional[str] = None,
description: Absent[str] = MISSING,
base_description: Optional[str] = None,
base_desc: Optional[str] = None,
base_default_permission: bool = True,
base_permissions: Optional[dict] = None,
subcommand_group_description: Optional[str] = None,
sub_group_desc: Optional[str] = None,
scopes: List["Snowflake_Type"] = None,
options: List[dict] = None,
) -> Callable[[Coroutine], SlashCommand]:
"""
A decorator specifically tailored for creating subcommands.
Args:
base: The name of the base command
subcommand_group: The name of the subcommand group, if any.
name: The name of the subcommand, defaults to the name of the coroutine.
description: The description of the subcommand
base_description: The description of the base command
base_desc: An alias of `base_description`
base_default_permission: If users have permission to run the command by default when no permissions have been set
base_permissions: Permissions of the command
subcommand_group_description: Description of the subcommand group
sub_group_desc: An alias for `subcommand_group_description`
scopes: The scopes of which this command is available, defaults to GLOBAL_SCOPE
options: The options for this command
Returns:
A SlashCommand object
"""
def wrapper(func) -> SlashCommand:
if not asyncio.iscoroutinefunction(func):
raise ValueError("Commands must be coroutines")
_description = description
if _description is MISSING:
_description = func.__doc__ if func.__doc__ else "No Description Set"
cmd = SlashCommand(
name=base,
description=(base_description or base_desc) or "No Description Set",
group_name=subcommand_group,
group_description=(subcommand_group_description or sub_group_desc) or "No Description Set",
sub_cmd_name=name,
sub_cmd_description=_description,
default_permission=base_default_permission,
permissions=base_permissions or {},
scopes=scopes if scopes else [GLOBAL_SCOPE],
callback=func,
options=options,
)
return cmd
return wrapper
def context_menu(
name: str,
context_type: "CommandTypes",
scopes: Absent[List["Snowflake_Type"]] = MISSING,
default_permission: bool = True,
permissions: Optional[List[Union[Permission, Dict]]] = None,
) -> Callable[[Coroutine], ContextMenu]:
"""
A decorator to declare a coroutine as a Context Menu.
parameters:
name: 1-32 character name of the context menu
context_type: The type of context menu
scope: The scope this command exists within
default_permission: Whether the menu is enabled by default when the app is added to a guild
permissions: The roles or users who can use this menu
returns:
ContextMenu object
"""
def wrapper(func) -> ContextMenu:
if not asyncio.iscoroutinefunction(func):
raise ValueError("Commands must be coroutines")
perm = permissions or {}
if hasattr(func, "permissions"):
if perm:
perm.update(func.permissions)
else:
perm = func.permissions
cmd = ContextMenu(
name=name,
type=context_type,
scopes=scopes if scopes else [GLOBAL_SCOPE],
default_permission=default_permission,
permissions=perm,
callback=func,
)
return cmd
return wrapper
def component_callback(*custom_id: str) -> Callable[[Coroutine], ComponentCommand]:
"""
Register a coroutine as a component callback.
Component callbacks work the same way as commands, just using components as a way of invoking, instead of messages.
Your callback will be given a single argument, `ComponentContext`
Args:
custom_id: The custom ID of the component to wait for
"""
def wrapper(func) -> ComponentCommand:
if not asyncio.iscoroutinefunction(func):
raise ValueError("Commands must be coroutines")
return ComponentCommand(name=f"ComponentCallback::{custom_id}", callback=func, listeners=custom_id)
# allows a mixture of generators and strings to be passed
unpack = []
for c in custom_id:
if inspect.isgenerator(c):
unpack += list(c)
else:
unpack.append(c)
custom_id = unpack
return wrapper
def slash_option(
name: str,
description: str,
opt_type: Union[OptionTypes, int],
required: bool = False,
autocomplete: bool = False,
choices: List[Union[SlashCommandChoice, dict]] = None,
channel_types: Optional[list[Union[ChannelTypes, int]]] = None,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
) -> Any:
r"""
A decorator to add an option to a slash command.
parameters:
name: 1-32 lowercase character name matching ^[\w-]{1,32}$
opt_type: The type of option
description: 1-100 character description of option
required: If the parameter is required or optional--default false
choices: A list of choices the user has to pick between (max 25)
channel_types: The channel types permitted. The option needs to be a channel
min_value: The minimum value permitted. The option needs to be an integer or float
max_value: The maximum value permitted. The option needs to be an integer or float
"""
def wrapper(func: Coroutine) -> Coroutine:
if hasattr(func, "cmd_id"):
raise Exception("slash_option decorators must be positioned under a slash_command decorator")
option = SlashCommandOption(
name=name,
type=opt_type,
description=description,
required=required,
autocomplete=autocomplete,
choices=choices if choices else [],
channel_types=channel_types,
min_value=min_value,
max_value=max_value,
)
if not hasattr(func, "options"):
func.options = []
func.options.insert(0, option)
return func
return wrapper
def slash_permission(*permission: Union[Permission, Dict]) -> Any:
"""
A decorator to add permissions for a guild to a slash command or context menu.
parameters:
*permission: The permissions to apply to this command
"""
def wrapper(func: Coroutine) -> Coroutine:
if hasattr(func, "cmd_id"):
raise Exception("slash_permission decorators must be positioned under a slash_command decorator")
if not hasattr(func, "permissions"):
func.permissions = []
func.permissions += list(permission)
return func
return wrapper
def auto_defer(ephemeral: bool = False, time_until_defer: float = 0.0) -> Callable[[Coroutine], Coroutine]:
"""
A decorator to add an auto defer to a application command.
Args:
ephemeral: Should the command be deferred as ephemeral
time_until_defer: How long to wait before deferring automatically
"""
def wrapper(func: Coroutine) -> Coroutine:
if hasattr(func, "cmd_id"):
raise Exception("auto_defer decorators must be positioned under a slash_command decorator")
func.auto_defer = AutoDefer(enabled=True, ephemeral=ephemeral, time_until_defer=time_until_defer)
return func
return wrapper
def application_commands_to_dict(commands: Dict["Snowflake_Type", Dict[str, InteractionCommand]]) -> dict:
"""
Convert the command list into a format that would be accepted by discord.
`Snake.interactions` should be the variable passed to this
"""
cmd_bases = {} # {cmd_base: [commands]}
"""A store of commands organised by their base command"""
output = {}
"""The output dictionary"""
def squash_subcommand(subcommands: List) -> Dict:
output_data = {}
groups = {}
sub_cmds = []
for subcommand in subcommands:
if not output_data:
output_data = {
"name": subcommand.name,
"description": subcommand.description,
"options": [],
"permissions": [s.to_dict() if not isinstance(s, dict) else s for s in subcommand.permissions],
"default_permission": subcommand.default_permission,
}
if subcommand.group_name:
if subcommand.group_name not in groups:
groups[subcommand.group_name] = {
"name": subcommand.group_name,
"description": subcommand.group_description,
"type": int(OptionTypes.SUB_COMMAND_GROUP),
"options": [],
}
groups[subcommand.group_name]["options"].append(
subcommand.to_dict() | {"type": int(OptionTypes.SUB_COMMAND)}
)
elif subcommand.is_subcommand:
sub_cmds.append(subcommand.to_dict() | {"type": int(OptionTypes.SUB_COMMAND)})
options = list(groups.values()) + sub_cmds
output_data["options"] = options
return output_data
for _scope, cmds in commands.items():
for cmd in cmds.values():
if cmd.name not in cmd_bases:
cmd_bases[cmd.name] = [cmd]
continue
if cmd not in cmd_bases[cmd.name]:
cmd_bases[cmd.name].append(cmd)
for cmd_list in cmd_bases.values():
if any(c.is_subcommand for c in cmd_list):
# validate all commands share required attributes
scopes: list[Snowflake_Type] = list({s for c in cmd_list for s in c.scopes})
permissions: list = list({d for c in cmd_list for d in c.permissions})
base_description = next(
(c.description for c in cmd_list if c.description is not None), "No Description Set"
)
if not all(c.description in (base_description, "No Description Set") for c in cmd_list):
log.warning(
f"Conflicting descriptions found in `{cmd_list[0].name}` subcommands; `{base_description}` will be used"
)
if not all(c.default_permission == cmd_list[0].default_permission for c in cmd_list):
raise ValueError(f"Conflicting `default_permission` values found in `{cmd_list[0].name}`")
for cmd in cmd_list:
cmd.scopes = list(scopes)
cmd.permissions = permissions
cmd.description = base_description
# end validation of attributes
cmd_data = squash_subcommand(cmd_list)
else:
scopes = cmd_list[0].scopes
cmd_data = cmd_list[0].to_dict()
for s in scopes:
if s not in output:
output[s] = [cmd_data]
continue
output[s].append(cmd_data)
return output
def _compare_options(local_opt_list: dict, remote_opt_list: dict):
if local_opt_list != remote_opt_list:
if len(local_opt_list) != len(remote_opt_list):
return False
for i in range(len(local_opt_list)):
local_option = local_opt_list[i]
remote_option = remote_opt_list[i]
if local_option["type"] == remote_option["type"]:
if local_option["type"] in (OptionTypes.SUB_COMMAND_GROUP, OptionTypes.SUB_COMMAND):
if not _compare_options(local_option.get("options", []), remote_option.get("options", [])):
return False
else:
if (
local_option["name"] != remote_option["name"]
or local_option["description"] != remote_option["description"]
or local_option["required"] != remote_option.get("required", False)
or local_option["autocomplete"] != remote_option.get("autocomplete", False)
or local_option["choices"] != remote_option.get("choices", [])
):
return False
else:
return False
return True
def sync_needed(local_cmd: dict, remote_cmd: Optional[dict] = None) -> bool:
"""
Compares a local application command to its remote counterpart to determine if a sync is required.
Args:
local_cmd: The local json representation of the command
remote_cmd: The json representation of the command from Discord
Returns:
Boolean indicating if a sync is needed
"""
if not remote_cmd:
# No remote version, command must be new
return True
if (
local_cmd["name"] != remote_cmd["name"]
or local_cmd.get("description", "") != remote_cmd.get("description", "")
or local_cmd["default_permission"] != remote_cmd["default_permission"]
):
# basic comparison of attributes
return True
if remote_cmd["type"] == CommandTypes.CHAT_INPUT:
try:
if not _compare_options(local_cmd["options"], remote_cmd["options"]):
# options are not the same, sync needed
return True
except KeyError:
if "options" in local_cmd or "options" in remote_cmd:
return True
return False
| 36.793467 | 134 | 0.63482 |
e0fbe8e2486effc235dfc7636d289fd16a8fef33 | 96,271 | py | Python | tensorflow/lite/python/lite.py | henriwoodcock/tensorflow | d9ad2a9f62ada961b26d09c8f4856cb0c94d9f8a | [
"Apache-2.0"
] | null | null | null | tensorflow/lite/python/lite.py | henriwoodcock/tensorflow | d9ad2a9f62ada961b26d09c8f4856cb0c94d9f8a | [
"Apache-2.0"
] | null | null | null | tensorflow/lite/python/lite.py | henriwoodcock/tensorflow | d9ad2a9f62ada961b26d09c8f4856cb0c94d9f8a | [
"Apache-2.0"
] | 1 | 2021-04-20T18:26:18.000Z | 2021-04-20T18:26:18.000Z | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import pprint
import shutil
import tempfile
import warnings
from absl import logging
import six
from six import PY2
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.examples.lstm.rnn import dynamic_rnn # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TFLiteLSTMCell # pylint: disable=unused-import
from tensorflow.lite.experimental.examples.lstm.rnn_cell import TfLiteRNNCell # pylint: disable=unused-import
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op # pylint: disable=unused-import
from tensorflow.lite.experimental.tensorboard.ops_util import get_potentially_supported_ops # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import convert_saved_model as _convert_saved_model
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import mlir_quantize as _mlir_quantize
from tensorflow.lite.python.convert import mlir_sparsify as _mlir_sparsify
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import OpResolverType # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import is_ophint_converted as _is_ophint_converted
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import build_debug_info_func as _build_debug_info_func
from tensorflow.lite.python.util import convert_debug_info_func as _convert_debug_info_func
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_debug_info as _get_debug_info
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import get_tf_type_name as _get_tf_type_name
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import model_input_signature as _model_input_signature
from tensorflow.lite.python.util import modify_model_io_type as _modify_model_io_type
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.lite.python.util import trace_model_call as _trace_model_call
from tensorflow.python.client import session as _session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import loader_impl as _loader_impl
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.saved_model.load import load as _load
from tensorflow.python.saved_model.loader_impl import parse_saved_model_with_debug_info as _parse_saved_model_with_debug_info
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util import keras_deps
from tensorflow.python.util.tf_export import tf_export as _tf_export
# pylint: disable=g-import-not-at-top
try:
from tensorflow.lite.python import metrics_portable as metrics
except ImportError:
from tensorflow.lite.python import metrics_nonportable as metrics
# pylint: enable=g-import-not-at-top
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating a tflite model.
DEFAULT
Default optimization strategy that quantizes model weights. Enhanced
optimizations are gained by providing a representative dataset that
quantizes biases and activations as well.
Converter will do its best to reduce size and latency, while minimizing
the loss in accuracy.
OPTIMIZE_FOR_SIZE
Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_LATENCY
Deprecated. Does the same as DEFAULT.
EXPERIMENTAL_SPARSITY
Experimental flag, subject to change.
Enable optimization by taking advantage of the sparse model weights
trained with pruning.
The converter will inspect the sparsity pattern of the model weights and
do its best to improve size and latency.
The flag can be used alone to optimize float32 models with sparse weights.
It can also be used together with the DEFAULT optimization mode to
optimize quantized models with sparse weights.
"""
# Default optimization strategy that quantizes model weights. Enhanced
# optimizations are gained by providing a representative dataset that
# quantizes biases and activations as well.
# Converter will do its best to reduce size and latency, while minimizing
# the loss in accuracy.
DEFAULT = "DEFAULT"
# Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
# Experimental flag, subject to change.
# Enable optimization by taking advantage of the sparse model weights trained
# with pruning.
#
# The converter will inspect the sparsity pattern of the model weights and do
# its best to improve size and latency.
# The flag can be used alone to optimize float32 models with sparse weights.
# It can also be used together with the DEFAULT optimization mode to optimize
# quantized models with sparse weights.
# TODO(b/161560631): Add log message when this optimization is applied.
EXPERIMENTAL_SPARSITY = "EXPERIMENTAL_SPARSITY"
def __str__(self):
return str(self.value)
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset used to optimize the model.
This is a generator function that provides a small dataset to calibrate or
estimate the range, i.e, (min, max) of all floating-point arrays in the model
(such as model input, activation outputs of intermediate layers, and model
output) for quantization. Usually, this is a small subset of a few hundred
samples randomly chosen, in no particular order, from the training or
evaluation dataset.
"""
def __init__(self, input_gen):
"""Creates a representative dataset.
Args:
input_gen: A generator function that generates input samples for the
model and has the same order, type and shape as the inputs to the model.
Usually, this is a small subset of a few hundred samples randomly
chosen, in no particular order, from the training or evaluation dataset.
"""
self.input_gen = input_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device used to optimize the model.
Attributes:
supported_ops: Experimental flag, subject to change. Set of `tf.lite.OpsSet`
options, where each option represents a set of operators supported by the
target device. (default {tf.lite.OpsSet.TFLITE_BUILTINS}))
supported_types: Set of `tf.dtypes.DType` data types supported on the target
device. If initialized, optimization might be driven by the smallest type
in this set. (default set())
experimental_select_user_tf_ops: Experimental flag, subject to change. Set
of user's TensorFlow operators' names that are required in the TensorFlow
Lite runtime. These ops will be exported as select TensorFlow ops in the
model (in conjunction with the tf.lite.OpsSet.SELECT_TF_OPS flag). This is
an advanced feature that should only be used if the client is using TF ops
that may not be linked in by default with the TF ops that are provided
when using the SELECT_TF_OPS path. The client is responsible for linking
these ops into the target runtime.
"""
def __init__(self,
supported_ops=None,
supported_types=None,
experimental_select_user_tf_ops=None):
if supported_ops is None:
supported_ops = {OpsSet.TFLITE_BUILTINS}
self.supported_ops = supported_ops
if supported_types is None:
supported_types = set()
self.supported_types = supported_types
if experimental_select_user_tf_ops is None:
experimental_select_user_tf_ops = set()
self.experimental_select_user_tf_ops = experimental_select_user_tf_ops
self._experimental_custom_op_registerers = []
class QuantizationMode(object):
"""QuantizationMode determines the quantization type from user options."""
def __init__(self, optimizations, target_spec, representative_dataset,
graph_def, disable_per_channel=False):
self._optimizations = optimizations
self._target_spec = target_spec
self._representative_dataset = representative_dataset
self._graph_def = graph_def
self._validate_int8_required()
self._disable_per_channel = disable_per_channel
# TODO(b/162537905): Refactor the following quantization functions -
# re-organize and refactor for better readability.
def post_training_int8_no_float(self):
return (self._any_optimization_enabled() and
self._is_int8_target_required() and
not self._is_int16x8_target_required() and
not self._is_allow_float() and
self._representative_dataset is not None)
def post_training_int8_allow_float(self):
return (self._any_optimization_enabled() and
not self._is_int16x8_target_required() and
self._representative_dataset is not None and
self._smallest_supported_type() == _dtypes.int8)
def is_post_training_integer_quantize_8(self):
return (self.post_training_int8_no_float() or
self.post_training_int8_allow_float())
def is_post_training_integer_quantize_16x8(self):
return (self.post_training_int16x8_no_float() or
self.post_training_int16x8_allow_float())
def is_integer_quantize(self):
return (self.is_post_training_integer_quantize_8() or
self.is_post_training_integer_quantize_16x8() or
self.is_training_time_int8_allow_float())
def is_training_time_int8_allow_float(self):
return (self._any_optimization_enabled() and
self.contains_training_quant_op())
def post_training_int16x8_no_float(self):
return (self._any_optimization_enabled() and
not self._is_int8_target_required() and
self._is_int16x8_target_required() and
not self._is_allow_float() and
self._representative_dataset is not None)
def post_training_int16x8_allow_float(self):
return (self._any_optimization_enabled() and
self._is_int16x8_target_required() and
self._is_allow_float())
def post_training_dynamic_range_int8(self):
# Post-training dynamic range quantization is only enabled if post-training
# int8 quantization and training time quantization was not done.
return (self._any_optimization_enabled() and
self._representative_dataset is None and
not self.contains_training_quant_op() and
self._smallest_supported_type() == _dtypes.int8)
def post_training_fp16(self):
return (self._any_optimization_enabled() and
self._smallest_supported_type() == _dtypes.float16)
def fp32_execution(self):
"""If none of the above are true."""
return not (self.is_integer_quantize() or
self.post_training_dynamic_range_int8() or
self.post_training_fp16())
def activations_type(self):
return _dtypes.int16 if self._is_int16x8_target_required() \
else _dtypes.int8
def converter_flags(self, inference_ty=None, inference_input_ty=None):
"""Flags to the converter."""
if self.is_integer_quantize():
return {
"inference_type": inference_ty if inference_ty else \
self.activations_type(),
"inference_input_type": _dtypes.float32,
"post_training_quantize": False, # disable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
elif self.post_training_dynamic_range_int8():
return {
"inference_type": _dtypes.float32,
"inference_input_type": _dtypes.float32,
"post_training_quantize": True, # enable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
elif self.post_training_fp16():
return {
"inference_type": _dtypes.float32,
"inference_input_type": _dtypes.float32,
"post_training_quantize": True,
"quantize_to_float16": True # enable float16 quantization
}
else:
# Note this might still trigger (uint8) quantization to be compatible with
# TOCO.
return {
"inference_type": inference_ty if inference_ty else _dtypes.float32,
"inference_input_type": inference_input_ty,
"post_training_quantize": False, # enable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
def quantizer_flags(self, input_ty=None, output_ty=None):
"""Default flags to the TFMOT quantizer."""
inference_input_type = input_ty if input_ty else _dtypes.float32
inference_output_type = output_ty if output_ty else _dtypes.float32
if self.post_training_int8_no_float() \
or self.post_training_int16x8_no_float():
return True, {
"inference_input_type": inference_input_type,
"inference_output_type": inference_output_type,
"activations_type": self.activations_type(),
"allow_float": False,
"disable_per_channel": self._disable_per_channel,
}
elif self.post_training_int8_allow_float() \
or self.post_training_int16x8_allow_float():
return True, {
"inference_input_type": inference_input_type,
"inference_output_type": inference_output_type,
"activations_type": self.activations_type(),
"allow_float": True,
"disable_per_channel": self._disable_per_channel,
}
else:
return False, None
def flags_modify_model_io_type(self, input_ty=None, output_ty=None):
"""Flags for modifying the input and output type of a tflite model."""
if self.is_integer_quantize():
return {
"inference_input_type": input_ty if input_ty else _dtypes.float32,
"inference_output_type": output_ty if output_ty else _dtypes.float32,
}
else:
return None
# Below are helpers for the above functions.
def _validate_int8_required(self):
"""Int8 mode requires certain parameters to exist and be compatible."""
if not self._is_int8_target_required():
return
if self._target_spec.supported_types and (self._smallest_supported_type() !=
_dtypes.int8):
raise ValueError("TFLITE_BUILTINS_INT8 requires smallest supported "
"type to be INT8.")
if self._representative_dataset:
if not isinstance(self._representative_dataset, RepresentativeDataset):
self._representative_dataset = RepresentativeDataset(
self._representative_dataset)
if self._representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
else:
# TODO(b/162537905): Relax this check for QAT.
raise ValueError("representative_dataset is required when specifying "
"TFLITE_BUILTINS_INT8 or INT8 supported types.")
def _is_int8_target_required(self):
return (OpsSet.TFLITE_BUILTINS_INT8 in set(
self._target_spec.supported_ops)) or (set(
self._target_spec.supported_types) == set([_dtypes.int8]))
def _is_int16x8_target_required(self):
return (OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
in set(self._target_spec.supported_ops))
def _is_allow_float(self):
return (OpsSet.TFLITE_BUILTINS in set(
self._target_spec.supported_ops)) or (OpsSet.SELECT_TF_OPS in set(
self._target_spec.supported_ops))
def _any_optimization_enabled(self):
return bool(
set(self._optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
]))
def _smallest_supported_type(self):
if self._target_spec.supported_types:
return min(self._target_spec.supported_types, key=lambda x: x.size)
else:
# The default smallest supported type is INT8.
return _dtypes.int8
def contains_training_quant_op(self):
"""Checks if the graph contains any training-time quantization ops."""
training_quant_ops = frozenset({
"FakeQuantWithMinMaxVars", "FakeQuantWithMinMaxVarsPerChannel",
"FakeQuantWithMinMaxArgs", "FakeQuantWithMinMaxArgsPerChannel",
"QuantizeAndDequantizeV2", "QuantizeAndDequantizeV3"
})
for node_def in self._graph_def.node:
if node_def.op in training_quant_ops:
return True
for function in self._graph_def.library.function:
for node_def in function.node_def:
if node_def.op in training_quant_ops:
return True
return False
# The metrics are unregistered if their variables get garbage-collected. So use
# a global variable to keep them alive till program exits.
_global_metrics = metrics.TFLiteMetrics()
class TFLiteConverterBase(object):
"""Converter subclass to share functionality between V1 and V2 converters."""
def __init__(self):
self.optimizations = set()
self.representative_dataset = None
self.target_spec = TargetSpec()
self.allow_custom_ops = False
self.experimental_new_converter = True
self.experimental_new_quantizer = True
self._experimental_new_quantizer = None
self._experimental_calibrate_only = False
self._experimental_sparsify_model = False
self._experimental_disable_per_channel = False
self._debug_info = None # contains the stack traces of all the original
# nodes in the `GraphDef` to the converter.
self.saved_model_dir = None
self._saved_model_tags = None
self._saved_model_version = 0
self._saved_model_exported_names = []
# Variable for converter metrics.
self._tflite_metrics = _global_metrics
self._experimental_disable_batchmatmul_unfold = False
self._experimental_lower_tensor_list_ops = True
def _grappler_config(self, optimizers=None):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
if not optimizers:
optimizers = []
# MLIR converter will take care of constant folding instead of grappler.
if not self.experimental_new_converter:
optimizers.append("constfold")
is_only_flex_enabled = (
set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops))
if is_only_flex_enabled:
# The layout optimizer turns NHCW to NCHW. This provides performance
# optimizations when Flex mode is enabled. However, this is not compatible
# with builtin ops.
optimizers.append("layout")
return _get_grappler_config(optimizers)
def _calibrate_quantize_model(self, result, inference_input_type,
inference_output_type, activations_type,
allow_float, disable_per_channel):
"""Calibrate and quantize the model."""
# pylint: disable=protected-access
custom_op_registerers_by_name = [
x for x in self.target_spec._experimental_custom_op_registerers
if isinstance(x, str)
]
custom_op_registerers_by_func = [
x for x in self.target_spec._experimental_custom_op_registerers
if not isinstance(x, str)
]
# pylint: enable=protected-access
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
self.representative_dataset)
# Add intermediate tensors to the model if needed.
result = _calibrator.add_intermediate_tensors(result)
calibrate_quantize = _calibrator.Calibrator(result,
custom_op_registerers_by_name,
custom_op_registerers_by_func)
if self._experimental_calibrate_only or self.experimental_new_quantizer:
calibrated = calibrate_quantize.calibrate(
self.representative_dataset.input_gen)
if self._experimental_calibrate_only:
return calibrated
elif self.experimental_new_quantizer and (
activations_type != _dtypes.int16):
# TODO(b/175659372): remove the activations_type restriction and enable
# it for all the activation types.
return _mlir_quantize(calibrated, disable_per_channel,
input_data_type=inference_input_type,
output_data_type=inference_output_type)
else:
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, inference_input_type,
inference_output_type, allow_float, activations_type,
disable_per_channel=disable_per_channel)
def _is_unknown_shapes_allowed(self):
# Unknown dimensions are only allowed with the new converter.
return self.experimental_new_converter
def _get_base_converter_args(self):
"""Returns the base converter args.
Returns:
{key str: val}
"""
args = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"debug_info": self._debug_info,
"target_ops": self.target_spec.supported_ops,
"enable_mlir_converter": self.experimental_new_converter,
"select_user_tf_ops": self.target_spec.experimental_select_user_tf_ops,
"unfold_batchmatmul": not self._experimental_disable_batchmatmul_unfold,
"lower_tensor_list_ops": self._experimental_lower_tensor_list_ops,
}
if self.saved_model_dir:
args.update({
"saved_model_dir": self.saved_model_dir,
"saved_model_version": self._saved_model_version,
"saved_model_tags": self._saved_model_tags,
"saved_model_exported_names": self._saved_model_exported_names,
})
return args
def _contains_function_with_implements_attr(self, saved_model_proto):
meta_graph = saved_model_proto.meta_graphs[0]
for function in meta_graph.graph_def.library.function:
if function.attr.get("_implements", None) or function.attr.get(
"api_implements", None):
return True
return False
def _parse_saved_model_args(self, always_enable_saved_model_import=False):
"""Parses SavedModel arguments from the given Keras/RNN SavedModel.
Args:
always_enable_saved_model_import: Bool. When the value is true, it enables
MLIR saved model import path regardless of checking the conditions.
"""
if not self.experimental_new_converter:
self.saved_model_dir = None
return
if self.saved_model_dir:
try:
saved_model_proto, _ = (
_parse_saved_model_with_debug_info(self.saved_model_dir))
except OSError:
# If it fails to read the given saved model, it will fall back to the
# frozen graph def path.
self.saved_model_dir = None
return
if (not always_enable_saved_model_import and
not self._contains_function_with_implements_attr(saved_model_proto)):
self.saved_model_dir = None
return
if not self._saved_model_exported_names:
self._saved_model_exported_names = []
self._saved_model_version = saved_model_proto.saved_model_schema_version
if self._saved_model_version == 0:
self.saved_model_dir = None
logging.warning("SavedModel schema version is zero.")
return
if self._saved_model_version not in [1, 2]:
raise ValueError("SavedModel file format({0}) is not supported".format(
self._saved_model_version))
def _sparsify_model(self):
return Optimize.EXPERIMENTAL_SPARSITY in self.optimizations
def _validate_experimental_new_quantizer_flag(self):
if self._experimental_new_quantizer is not None:
raise ValueError("Please use 'experimental_new_quantizer' instead.")
def _increase_conversion_attempt_metric(self):
self._tflite_metrics.increase_counter_converter_attempt()
def _increase_conversion_success_metric(self, result):
if result:
self._tflite_metrics.increase_counter_converter_success()
def _save_conversion_params_metric(self,
converter_params,
graph_def=None,
inference_type=None,
inference_input_type=None):
"""Set conversion parameter metrics."""
converter_kwargs = converter_params.copy()
converter_kwargs.update(self._get_base_converter_args())
# quantization-replated parameters.
try:
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, graph_def)
calibrate_and_quantize, flags = quant_mode.quantizer_flags(
inference_type, inference_input_type)
converter_kwargs.update({
"calibrate_and_quantize": calibrate_and_quantize,
})
if calibrate_and_quantize:
converter_kwargs.update(flags)
converter_kwargs.update(
quant_mode.converter_flags(inference_type, inference_input_type))
except Exception: # pylint: disable=broad-except
# Still updates other params.
pass
# Optimization parameters.
optimization_default = set(self.optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
])
converter_kwargs.update({
"optimization_sparsify_model": self._sparsify_model(),
"optimization_default": bool(optimization_default),
})
def format_element(elem):
if isinstance(elem, enum.Enum):
return str(elem.value)
return pprint.pformat(elem)
def format_param(param):
if isinstance(param, (list, tuple, set)):
if not param:
return "None" # Return None if empty.
string_list = [format_element(x) for x in param]
return ",".join(sorted(string_list))
return format_element(param)
for key, value in converter_kwargs.items():
self._tflite_metrics.set_converter_param(key, format_param(value))
class TFLiteConverterBaseV2(TFLiteConverterBase):
"""Converter subclass to share functionality between V2 converters."""
def __init__(self):
"""Constructor for TFLiteConverter."""
super(TFLiteConverterBaseV2, self).__init__()
self.inference_input_type = _dtypes.float32
self.inference_output_type = _dtypes.float32
def _validate_inference_input_output_types(self, quant_mode):
"""Validate inference_input_type and inference_output_type flags."""
default_types = [_dtypes.float32]
# We support integer input/output for integer quantized models only.
if quant_mode.is_integer_quantize():
if quant_mode.is_post_training_integer_quantize_16x8():
all_types = default_types + [_dtypes.int16]
else:
all_types = default_types + [_dtypes.int8, _dtypes.uint8]
if self.inference_input_type not in all_types or \
self.inference_output_type not in all_types:
all_types_names = ["tf." + t.name for t in all_types]
raise ValueError("The inference_input_type and inference_output_type "
"must be in {}.".format(all_types_names))
elif self.inference_input_type not in default_types or \
self.inference_output_type not in default_types:
raise ValueError("The inference_input_type and inference_output_type "
"must be tf.float32.")
def _save_conversion_params_metric(self,
converter_params,
graph_def=None,
inference_type=None,
inference_input_type=None):
converter_kwargs = converter_params.copy()
converter_kwargs.update({
"api_version": 2,
})
super(TFLiteConverterBaseV2,
self)._save_conversion_params_metric(converter_kwargs, graph_def,
inference_type,
inference_input_type)
def convert(self, graph_def, input_tensors, output_tensors):
"""Converts a TensorFlow GraphDef based on instance variables.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
# Update conversion params with graph_def.
self._save_conversion_params_metric({}, graph_def)
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, graph_def,
self._experimental_disable_per_channel)
self._validate_inference_input_output_types(quant_mode)
self._validate_experimental_new_quantizer_flag()
if not self._is_unknown_shapes_allowed():
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
if self._trackable_obj is None:
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), graph_def)
else:
self._debug_info = _get_debug_info(
_convert_debug_info_func(self._trackable_obj.graph_debug_info),
graph_def)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(quant_mode.converter_flags())
if not self.experimental_new_converter:
logging.warning(
"Please consider switching to the new converter by setting "
"experimental_new_converter=True. "
"The old converter (TOCO) is deprecated.")
else:
logging.info("Using new converter: If you encounter a problem "
"please file a bug. You can opt-out "
"by setting experimental_new_converter=False")
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
if self.experimental_new_quantizer:
calibrate_and_quantize, flags = quant_mode.quantizer_flags(
self.inference_input_type, self.inference_output_type)
else:
calibrate_and_quantize, flags = quant_mode.quantizer_flags()
if calibrate_and_quantize:
result = self._calibrate_quantize_model(result, **flags)
flags_modify_model_io_type = quant_mode.flags_modify_model_io_type(
self.inference_input_type, self.inference_output_type)
if flags_modify_model_io_type:
result = _modify_model_io_type(result, **flags_modify_model_io_type)
if self._sparsify_model():
result = _mlir_sparsify(result)
return result
class TFLiteSavedModelConverterV2(TFLiteConverterBaseV2):
"""Converts the given SavedModel into TensorFlow Lite model.
Attributes:
saved_model_dir: Directory of the SavedModel.
"""
def __init__(self,
saved_model_dir,
saved_model_tags=None,
saved_model_exported_names=None,
trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING}).
saved_model_exported_names: Names to be exported when the saved model
import path is on.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteSavedModelConverterV2, self).__init__()
self.saved_model_dir = saved_model_dir
self._saved_model_tags = saved_model_tags
self._saved_model_exported_names = saved_model_exported_names
self._trackable_obj = trackable_obj
self._parse_saved_model_args(always_enable_saved_model_import=True)
self._enable_tflite_resource_variables = False
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
converter_kwargs = {
"enable_tflite_resource_variables":
self._enable_tflite_resource_variables
}
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric(converter_kwargs)
graph = _ops.Graph()
saved_model = _loader_impl.SavedModelLoader(self.saved_model_dir)
saved_model.load_graph(graph, tags=self._saved_model_tags)
meta_graph = saved_model.get_meta_graph_def_from_tags(
self._saved_model_tags)
# If we can't use saved model importer, then fallback
# to frozen graph conversion path.
if self.saved_model_dir is None or not self.experimental_new_converter:
signature_def = meta_graph.signature_def[
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_tensors = [
graph.get_tensor_by_name(signature_def.inputs[key].name)
for key in signature_def.inputs
]
output_tensors = [
graph.get_tensor_by_name(signature_def.outputs[key].name)
for key in signature_def.outputs
]
result = _freeze_saved_model(
self.saved_model_dir, None, None, None, self._saved_model_tags,
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
graph_def = result[0]
# We make sure to clear the saved_model_dir as there is some
# legacy code down in the caller that checks this.
# TODO(b/162537905): Clean these indirect dependencies.
self.saved_model_dir = None
conversion_result = super(TFLiteSavedModelConverterV2,
self).convert(graph_def, input_tensors,
output_tensors)
self._increase_conversion_success_metric(conversion_result)
return conversion_result
if self._trackable_obj is None:
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), meta_graph.graph_def)
else:
self._debug_info = _get_debug_info(
_convert_debug_info_func(self._trackable_obj.graph_debug_info),
meta_graph.graph_def)
# Update conversion params with graph_def.
self._save_conversion_params_metric(converter_kwargs, meta_graph.graph_def)
# Get quantization options and do some sanity checks.
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset,
meta_graph.graph_def,
self._experimental_disable_per_channel)
self._validate_inference_input_output_types(quant_mode)
converter_kwargs.update(self._get_base_converter_args())
converter_kwargs.update(quant_mode.converter_flags())
result = _convert_saved_model(**converter_kwargs)
if self.experimental_new_quantizer:
calibrate_and_quantize, flags = quant_mode.quantizer_flags(
self.inference_input_type, self.inference_output_type)
else:
calibrate_and_quantize, flags = quant_mode.quantizer_flags()
if calibrate_and_quantize:
result = self._calibrate_quantize_model(result, **flags)
flags_modify_model_io_type = quant_mode.flags_modify_model_io_type(
self.inference_input_type, self.inference_output_type)
if flags_modify_model_io_type:
result = _modify_model_io_type(result, **flags_modify_model_io_type)
if self._sparsify_model():
result = _mlir_sparsify(result)
self._increase_conversion_success_metric(result)
return result
class TFLiteKerasModelConverterV2(TFLiteConverterBaseV2):
"""Converts the given Keras model into TensorFlow Lite model."""
def __init__(self, keras_model, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
keras_model: tf.Keras.Model.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteKerasModelConverterV2, self).__init__()
self._keras_model = keras_model
self._trackable_obj = trackable_obj
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
try:
self._keras_model.save(temp_dir, save_format="tf")
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None
self.saved_model_dir = temp_dir
self._saved_model_tags = set([_tag_constants.SERVING])
self._saved_model_exported_names = [
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
self._parse_saved_model_args()
if self.saved_model_dir:
graph = _ops.Graph()
saved_model = _loader_impl.SavedModelLoader(self.saved_model_dir)
saved_model.load_graph(graph, tags=self._saved_model_tags)
meta_graph = saved_model.get_meta_graph_def_from_tags(
self._saved_model_tags)
signature_def = meta_graph.signature_def[
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_tensors = [
graph.get_tensor_by_name(signature_def.inputs[key].name)
for key in signature_def.inputs
]
output_tensors = [
graph.get_tensor_by_name(signature_def.outputs[key].name)
for key in signature_def.outputs
]
self._trackable_obj = _load(self.saved_model_dir,
self._saved_model_tags)
return super(TFLiteKerasModelConverterV2,
self).convert(meta_graph.graph_def, input_tensors,
output_tensors)
finally:
shutil.rmtree(temp_dir, True)
def convert(self):
"""Converts a keras model based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric({})
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
self._increase_conversion_success_metric(saved_model_convert_result)
return saved_model_convert_result
input_signature = None
# If the model's call is not a `tf.function`, then we need to first get its
# input signature from `model_input_signature` method. We can't directly
# call `trace_model_call` because otherwise the batch dimension is set
# to None.
# Once we have better support for dynamic shapes, we can remove this.
if not isinstance(self._keras_model.call, _def_function.Function):
# Pass `keep_original_batch_size=True` will ensure that we get an input
# signature including the batch dimension specified by the user.
# TODO(b/169898786): Use the Keras public API when TFLite moves out of TF
input_signature = _model_input_signature(
self._keras_model, keep_original_batch_size=True)
# TODO(b/169898786): Use the Keras public API when TFLite moves out of TF
func = _trace_model_call(self._keras_model, input_signature)
concrete_func = func.get_concrete_function()
self._funcs = [concrete_func]
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
grappler_config = self._grappler_config()
# Skip running grappler when there are no optimizers to run. If not,
# grappler will run with the default optimizer set and it will lead to
# causing an unexpected behavior.
if grappler_config.graph_options.rewrite_options.optimizers:
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=grappler_config,
graph=frozen_func.graph)
result = super(TFLiteKerasModelConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
self._increase_conversion_success_metric(result)
return result
class TFLiteFrozenGraphConverterV2(TFLiteConverterBaseV2):
"""Converts the given frozen graph into TensorFlow Lite model."""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteFrozenGraphConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
# TODO(b/130297984): Add support for converting multiple function.
if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test
raise ValueError("No ConcreteFunction is specified.")
if len(self._funcs) > 1:
raise ValueError("This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development.")
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric({})
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
# Run a Grappler pass.
grappler_config = self._grappler_config()
# Skip running grappler when there are no optimizers to run. If not,
# grappler will run with the default optimizer set and it will lead to
# causing an unexpected behavior.
if grappler_config.graph_options.rewrite_options.optimizers:
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=grappler_config,
graph=frozen_func.graph)
result = super(TFLiteFrozenGraphConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
self._increase_conversion_success_metric(result)
return result
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(TFLiteFrozenGraphConverterV2):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
optimizations: Experimental flag, subject to change. Set of optimizations
to apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a
set of values of type `tf.lite.Optimize`)
representative_dataset: A generator function used for integer quantization
where each generated sample has the same order, type and shape as the
inputs to the model. Usually, this is a small subset of a few hundred
samples randomly chosen, in no particular order, from the training or
evaluation dataset. This is an optional attribute, but required for full
integer quantization, i.e, if `tf.int8` is the only supported type in
`target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`.
(default None)
target_spec: Experimental flag, subject to change. Specifications of target
device, including supported ops set, supported types and a set of user's
defined TensorFlow operators required in the TensorFlow Lite runtime.
Refer to `tf.lite.TargetSpec`.
inference_input_type: Data type of the input layer. Note that integer types
(tf.int8 and tf.uint8) are currently only supported for post training
integer quantization and quantization aware training. (default tf.float32,
must be in {tf.float32, tf.int8, tf.uint8})
inference_output_type: Data type of the output layer. Note that integer
types (tf.int8 and tf.uint8) are currently only supported for post
training integer quantization and quantization aware training. (default
tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
allow_custom_ops: Boolean indicating whether to allow custom operations.
When False, any unknown operation is an error. When True, custom ops are
created for any op that is unknown. The developer needs to provide these
to the TensorFlow Lite runtime with a custom resolver. (default False)
experimental_new_converter: Experimental flag, subject to change. Enables
MLIR-based conversion instead of TOCO conversion. (default True)
experimental_new_quantizer: Experimental flag, subject to change. Enables
MLIR-based quantization conversion instead of Flatbuffer-based conversion.
(default True)
Example usage:
```python
# Converting a SavedModel to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.Keras model to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Converting ConcreteFunctions to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_concrete_functions([func])
tflite_model = converter.convert()
```
"""
# pylint: disable=useless-super-delegation
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)
@classmethod
def from_concrete_functions(cls, funcs):
"""Creates a TFLiteConverter object from ConcreteFunctions.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements. Currently converter can only convert a single
ConcreteFunction. Converting multiple functions is under development.
Returns:
TFLiteConverter object.
Raises:
Invalid input type.
"""
for func in funcs:
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a list of ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call get_concrete_function.")
raise ValueError(message)
return cls(funcs)
@classmethod
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING} or {'serve'})
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# When run without eager enabled, this will return the legacy
# TFLiteConverter.
if not context.executing_eagerly():
signature_key = None
if signature_keys:
if len(signature_keys) != 1:
raise ValueError("Only support a single signature key.")
else:
signature_key = signature_keys[0]
logging.warning("Invoking the TF1 implementation of TFLiteConverter "
"because eager is disabled. Consider enabling eager.")
return TFLiteConverter.from_saved_model(
saved_model_dir, signature_key=signature_key, tag_set=tags)
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
if tags is None:
tags = set([_tag_constants.SERVING])
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
if not signature_keys:
raise ValueError("Only support at least one signature key.")
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
saved_model_converter = TFLiteSavedModelConverterV2(saved_model_dir, tags,
signature_keys,
saved_model)
if saved_model_converter.saved_model_dir:
return saved_model_converter
return cls(funcs, saved_model)
@classmethod
def from_keras_model(cls, model):
"""Creates a TFLiteConverter object from a Keras model.
Args:
model: tf.Keras.Model
Returns:
TFLiteConverter object.
"""
return TFLiteKerasModelConverterV2(model)
# pylint: disable=useless-super-delegation
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
return super(TFLiteConverterV2, self).convert()
class TFLiteConverterBaseV1(TFLiteConverterBase):
"""Converter subclass to share functionality between V1 converters."""
def __init__(self, experimental_debug_info_func):
"""Constructor for TFLiteConverter.
Args:
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
"""
super(TFLiteConverterBaseV1, self).__init__()
self.inference_type = _dtypes.float32
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.conversion_summary_dir = None
self._debug_info_func = experimental_debug_info_func
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
if value:
self.optimizations = [Optimize.DEFAULT]
else:
self.optimizations = []
return
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
self.target_spec.supported_ops = value
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
return Optimize.DEFAULT in set(self.optimizations)
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
return self.target_spec.supported_ops
return object.__getattribute__(self, name)
def _validate_quantized_input_stats(self, converter_kwargs, calibrate):
"""Ensure the `quantized_input_stats` flag is provided if required."""
quantized_types = frozenset({_dtypes.int8, _dtypes.uint8})
requires_quantized_input_stats = (
(converter_kwargs["inference_type"] in quantized_types or
converter_kwargs["inference_input_type"] in quantized_types) and
not calibrate)
if (requires_quantized_input_stats and
not converter_kwargs["quantized_input_stats"]):
raise ValueError(
"The `quantized_input_stats` flag must be defined when either "
"`inference_type` flag or `inference_input_type` flag is set to "
"tf.int8 or tf.uint8. Currently, `inference_type={}` and "
"`inference_input_type={}`.".format(
_get_tf_type_name(converter_kwargs["inference_type"]),
_get_tf_type_name(converter_kwargs["inference_input_type"])))
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, self._graph_def,
self._experimental_disable_per_channel)
if (not self._is_unknown_shapes_allowed() and self._has_valid_tensors()):
# Checks dimensions in input tensor.
for tensor in self._input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
optimized_graph = self._graph_def
if not self.saved_model_dir:
# Disable grappler constant folding if there are training quant ops.
if not quant_mode.contains_training_quant_op():
try:
# TODO(b/150163103): Merge `disabling lower using switch merge' calls.
# Grappler will also try to lower while loop into switch merge
# representation which is undesired for Ophints, so we simply remove
# those attributes to prevent Grappler from doing so.
graph_def = _convert_to_constants.disable_lower_using_switch_merge(
optimized_graph)
# Run function inlining optimization to ensure any models generated
# through the from_frozen_graph path have been inlined.
optimized_graph = _run_graph_optimizations(
graph_def,
self._input_tensors,
self._output_tensors,
config=self._grappler_config(["function"]))
except Exception: # pylint: disable=broad-except
optimized_graph = self._graph_def
self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(
quant_mode.converter_flags(self.inference_type,
self.inference_input_type))
converter_kwargs.update({
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video,
"conversion_summary_dir": self.conversion_summary_dir,
})
if not self.experimental_new_converter:
logging.warning(
"Please consider switching to the new converter by setting "
"experimental_new_converter=True. "
"The old converter (TOCO) is deprecated.")
else:
logging.info("Using experimental converter: If you encountered a problem "
"please file a bug. You can opt-out "
"by setting experimental_new_converter=False")
if not self.experimental_new_converter:
calibrate_quantize, flags = quant_mode.quantizer_flags(
self.inference_input_type, self.inference_output_type)
else:
calibrate_quantize, flags = quant_mode.quantizer_flags()
self._validate_quantized_input_stats(converter_kwargs, calibrate_quantize)
self._validate_experimental_new_quantizer_flag()
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
if calibrate_quantize:
result = self._calibrate_quantize_model(result, **flags)
if self.experimental_new_converter or self.experimental_new_quantizer:
flags_modify_model_io_type = quant_mode.flags_modify_model_io_type(
self.inference_input_type, self.inference_output_type)
if flags_modify_model_io_type:
result = _modify_model_io_type(result, **flags_modify_model_io_type)
if self._sparsify_model():
result = _mlir_sparsify(result)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors is not None and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
if shape[0] is None:
shape[0] = batch_size
tensor.set_shape(shape)
def _is_unknown_shapes_allowed(self):
# Ophint Converted nodes will need the shapes to be known.
if _is_ophint_converted(self._graph_def):
return False
if not super(TFLiteConverterBaseV1, self)._is_unknown_shapes_allowed():
return False
# `conversion_summary_dir` calls TOCO. Unknown shapes are only supported by
# the MLIR converter.
if self.conversion_summary_dir:
logging.warning(
"`conversion_summary_dir` does not work with unknown shapes. "
"Graphs with unknown shapes might be different than when this flag "
"is disabled.")
return False
return True
def _save_conversion_params_metric(self, converter_params):
converter_kwargs = converter_params.copy()
converter_kwargs.update({
"output_format": self.output_format,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video,
"conversion_summary_dir": self.conversion_summary_dir,
"api_version": 1,
})
super(TFLiteConverterBaseV1,
self)._save_conversion_params_metric(converter_kwargs,
self._graph_def,
self.inference_type,
self.inference_input_type)
class TFLiteSavedModelConverter(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model.
Attributes:
saved_model_dir: Directory of the SavedModel.
"""
def __init__(self,
saved_model_dir,
saved_model_tags,
saved_model_exported_names,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING}).
saved_model_exported_names: Names to be exported when the saved model
import path is on.
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteSavedModelConverter,
self).__init__(experimental_debug_info_func)
self.saved_model_dir = saved_model_dir
self._saved_model_tags = saved_model_tags
self._saved_model_exported_names = saved_model_exported_names
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
if len(self._saved_model_exported_names) != 1:
raise ValueError("Only support a single signature key.")
signature_key = self._saved_model_exported_names[0]
result = _freeze_saved_model(self.saved_model_dir, None, None, None,
self._saved_model_tags, signature_key)
self._graph_def = result[0]
self._input_tensors = result[1]
self._output_tensors = result[2]
self._parse_saved_model_args()
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric({})
result = super(TFLiteSavedModelConverter, self).convert()
self._increase_conversion_success_metric(result)
return result
class TFLiteKerasModelConverter(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model."""
def __init__(self,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Constructor for TFLiteConverter.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteKerasModelConverter,
self).__init__(experimental_debug_info_func=None)
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError("`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution().")
keras_model = keras_deps.get_load_model_function()(model_file,
custom_objects)
function = _trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=False)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
self._keras_model = keras_model
self._graph_def = frozen_func.graph.as_graph_def()
self._input_tensors = frozen_func.inputs
self._output_tensors = frozen_func.outputs
self._debug_info_func = _build_debug_info_func(frozen_func.graph)
return
# Handles Keras when Eager mode is disabled.
keras_deps.get_clear_session_function()()
keras_model = keras_deps.get_load_model_function()(model_file,
custom_objects)
sess = keras_deps.get_get_session_function()()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
self._keras_model = keras_model
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess.graph)
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
try:
self._keras_model.save(temp_dir, save_format="tf")
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None
tag_set = set([_tag_constants.SERVING])
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(temp_dir, None, None, None, tag_set,
signature_key)
self.saved_model_dir = temp_dir
self._saved_model_tags = tag_set
self._saved_model_exported_names = [signature_key]
self._parse_saved_model_args()
if self.saved_model_dir:
self._graph_def = result[0]
self._input_tensors = result[1]
self._output_tensors = result[2]
self._debug_info_func = _build_debug_info_func(result[3])
# Update conversion params with graph_def.
self._save_conversion_params_metric({})
return super(TFLiteKerasModelConverter, self).convert()
finally:
shutil.rmtree(temp_dir, True)
def convert(self):
"""Converts a Keras model based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric({})
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
self._increase_conversion_success_metric(saved_model_convert_result)
return saved_model_convert_result
result = super(TFLiteKerasModelConverter, self).convert()
self._increase_conversion_success_metric(result)
return result
class TFLiteFrozenGraphConverter(TFLiteConverterBaseV1):
"""Converts the given frozen graph def into TensorFlow Lite model."""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo", [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteFrozenGraphConverter,
self).__init__(experimental_debug_info_func)
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
if input_tensors is not None and input_arrays_with_shape is not None:
logging.warning("input_arrays_with_shape will be ignored when both the "
"given input_tensors and input_arrays_with_shape are not "
"None.")
if output_tensors is not None and output_arrays is not None:
logging.warning("output_arrays will be ignored when both the given "
"output_tensors and output_arrays are not None.")
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric({})
result = super(TFLiteFrozenGraphConverter, self).convert()
self._increase_conversion_success_metric(result)
return result
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(TFLiteFrozenGraphConverter):
"""Convert a TensorFlow model into `output_format`.
This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras
model into either a TFLite FlatBuffer or graph visualization.
Attributes:
optimizations: Experimental flag, subject to change. Set of optimizations to
apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a
set of values of type `tf.lite.Optimize`)
representative_dataset: A generator function used for integer quantization
where each generated sample has the same order, type and shape as the
inputs to the model. Usually, this is a small subset of a few hundred
samples randomly chosen, in no particular order, from the training or
evaluation dataset. This is an optional attribute, but required for full
integer quantization, i.e, if `tf.int8` is the only supported type in
`target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`.
(default None)
target_spec: Experimental flag, subject to change. Specifications of target
device, including supported ops set, supported types and a set of user's
defined TensorFlow operators required in the TensorFlow Lite runtime.
Refer to `tf.lite.TargetSpec`.
inference_type: Data type of numeric arrays, excluding the input layer.
(default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
inference_input_type: Data type of the numeric arrays in the input layer. If
`inference_input_type` is in {tf.int8, tf.uint8}, then
`quantized_input_stats` must be provided. (default is the value assigned
to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
inference_output_type: Data type of the numeric arrays in the output layer.
(default is the value assigned to `inference_type`, must be in
{tf.float32, tf.int8, tf.uint8})
quantized_input_stats: Map of input tensor names to a tuple of floats
representing the mean and standard deviation of the training data.
(e.g., {"foo" : (0., 1.)}). Required if `inference_input_type` is tf.int8
or tf.uint8. (default None)
default_ranges_stats: Tuple of integers (min, max) representing range values
for all numeric arrays without a specified range. Intended for
experimenting with quantization via "dummy quantization". (default None)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When False any unknown operation is an error. When True, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
output_format: Output file format. (default
tf.compat.v1.lite.constants.TFLITE, must be in
{tf.compat.v1.lite.constants.TFLITE,
tf.compat.v1.lite.constants.GRAPHVIZ_DOT})
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
`output_format=tf.compat.v1.lite.constants.GRAPHVIZ_DOT` in order to keep
the requirements of the output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the GraphViz .dot
files after every graph transformation. Requires the `dump_graphviz_dir`
flag to be specified. (default False)
conversion_summary_dir: Full path of the directory to store conversion logs.
(default None)
target_ops: Deprecated. Please use `target_spec.supported_ops` instead.
post_training_quantize: Deprecated. Please use `optimizations` instead and
set it to `{tf.lite.Optimize.DEFAULT}`. (default False)
experimental_new_converter: Experimental flag, subject to change. Enables
MLIR-based conversion instead of TOCO conversion. (default True)
experimental_new_quantizer: Experimental flag, subject to change. Enables
MLIR-based quantization conversion instead of Flatbuffer-based conversion.
(default True)
Example usage:
```python
# Converting a GraphDef from session.
converter = tf.compat.v1.lite.TFLiteConverter.from_session(
sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(
saved_model_dir)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a tf.keras model.
converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(
keras_model)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
# pylint: disable=useless-super-delegation
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteConverter,
self).__init__(graph_def, input_tensors, output_tensors,
input_arrays_with_shape, output_arrays,
experimental_debug_info_func)
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY2:
file_content = six.ensure_binary(file_content, "utf-8")
else:
file_content = six.ensure_text(file_content, "utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING})
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
saved_model_converter = TFLiteSavedModelConverter(saved_model_dir, tag_set,
[signature_key])
if saved_model_converter.saved_model_dir:
return saved_model_converter
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0],
input_tensors=result[1],
output_tensors=result[2],
experimental_debug_info_func=_build_debug_info_func(result[3]))
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Returns:
TFLiteConverter class.
"""
return TFLiteKerasModelConverter(model_file, input_arrays, input_shapes,
output_arrays, custom_objects)
# pylint: disable=useless-super-delegation
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
return super(TFLiteConverter, self).convert()
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
| 42.150175 | 125 | 0.702745 |
c74f62328b4f6dca2b8b49f8233d22e7fa310dab | 913 | py | Python | object.py | ZhongXinWang/python | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | [
"Apache-2.0"
] | null | null | null | object.py | ZhongXinWang/python | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | [
"Apache-2.0"
] | null | null | null | object.py | ZhongXinWang/python | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
print('---------------面向对象------------------')
class Person(object):
def __init__(self,name,score):
self.name=name
self.__score = score
def toString(self):
return self.name,":",self.__score
def setScore(self,score):
self.__score = score
class Student(object):
def __init__(self, name, gender):
self.__name = name
self.__gender = gender
def getGender(self):
return self.__gender
def setGender(self,gender):
self.__gender = gender
if __name__ == '__main__':
p = Person('张三',90)
print(p.toString())
p.name='修改为李四'
#变量已经被设置为私有,p.__score=40无法修改,两个_
p.setScore(40)
print(p.toString())
# 测试:
bart = Student('Bart', 'male')
if bart.getGender() != 'male':
print('测试失败!')
else:
bart.setGender('female')
if bart.getGender() != 'female':
print('测试失败!')
else:
print('测试成功!')
| 21.232558 | 46 | 0.619934 |
f6fa341dbe765b78addfacf64b3a9795c95f2eff | 6,973 | py | Python | sqlserver/tests/test_connection.py | ganeshkumarsv/integrations-core | 55cce423d171a65f87760ff903214af7a0e2a4e4 | [
"BSD-3-Clause"
] | null | null | null | sqlserver/tests/test_connection.py | ganeshkumarsv/integrations-core | 55cce423d171a65f87760ff903214af7a0e2a4e4 | [
"BSD-3-Clause"
] | null | null | null | sqlserver/tests/test_connection.py | ganeshkumarsv/integrations-core | 55cce423d171a65f87760ff903214af7a0e2a4e4 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
import mock
import pyodbc
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.sqlserver import SQLServer
from datadog_checks.sqlserver.connection import Connection
from .common import CHECK_NAME
from .utils import not_windows_ci, windows_ci
pytestmark = pytest.mark.unit
@pytest.mark.parametrize(
'connector, param',
[
pytest.param('odbc', 'adoprovider', id='Provider is ignored when using odbc'),
pytest.param('adodbapi', 'dsn', id='DSN is ignored when using adodbapi'),
pytest.param('adodbapi', 'driver', id='Driver is ignored when using adodbapi'),
],
)
def test_will_warn_parameters_for_the_wrong_connection(instance_sql_defaults, connector, param):
instance_sql_defaults.update({'connector': connector, param: 'foo'})
connection = Connection({}, instance_sql_defaults, None)
connection.log = mock.MagicMock()
connection._connection_options_validation('somekey', 'somedb')
connection.log.warning.assert_called_once_with(
"%s option will be ignored since %s connection is used", param, connector
)
@pytest.mark.parametrize(
'connector, cs, param',
[
pytest.param('odbc', 'DSN', 'dsn', id='Cannot define DSN twice'),
pytest.param('odbc', 'DRIVER', 'driver', id='Cannot define DRIVER twice'),
pytest.param('odbc', 'SERVER', 'host', id='Cannot define DRIVER twice'),
pytest.param('odbc', 'UID', 'username', id='Cannot define UID twice'),
pytest.param('odbc', 'PWD', 'password', id='Cannot define PWD twice'),
pytest.param('adodbapi', 'PROVIDER', 'adoprovider', id='Cannot define PROVIDER twice'),
pytest.param('adodbapi', 'Data Source', 'host', id='Cannot define Data Source twice'),
pytest.param('adodbapi', 'User ID', 'username', id='Cannot define User ID twice'),
pytest.param('adodbapi', 'Password', 'password', id='Cannot define Password twice'),
],
)
def test_will_fail_for_duplicate_parameters(instance_sql_defaults, connector, cs, param):
instance_sql_defaults.update({'connector': connector, param: 'foo', 'connection_string': cs + "=foo"})
connection = Connection({}, instance_sql_defaults, None)
match = (
"%s has been provided both in the connection string and as a configuration option (%s), "
"please specify it only once" % (cs, param)
)
with pytest.raises(ConfigurationError, match=re.escape(match)):
connection._connection_options_validation('somekey', 'somedb')
@pytest.mark.parametrize(
'connector, cs',
[
pytest.param('adodbapi', 'DSN', id='Cannot define DSN for adodbapi'),
pytest.param('adodbapi', 'DRIVER', id='Cannot define DRIVER for adodbapi'),
pytest.param('adodbapi', 'SERVER', id='Cannot define DRIVER for adodbapi'),
pytest.param('adodbapi', 'UID', id='Cannot define UID for adodbapi'),
pytest.param('adodbapi', 'PWD', id='Cannot define PWD for adodbapi'),
pytest.param('odbc', 'PROVIDER', id='Cannot define PROVIDER for odbc'),
pytest.param('odbc', 'Data Source', id='Cannot define Data source for odbc'),
pytest.param('odbc', 'User ID', id='Cannot define User ID for odbc'),
pytest.param('odbc', 'Password', id='Cannot define Password for odbc'),
],
)
def test_will_fail_for_wrong_parameters_in_the_connection_string(instance_sql_defaults, connector, cs):
instance_sql_defaults.update({'connector': connector, 'connection_string': cs + '=foo'})
other_connector = 'odbc' if connector != 'odbc' else 'adodbapi'
connection = Connection({}, instance_sql_defaults, None)
match = (
"%s has been provided in the connection string. "
"This option is only available for %s connections, however %s has been selected"
% (cs, other_connector, connector)
)
with pytest.raises(ConfigurationError, match=re.escape(match)):
connection._connection_options_validation('somekey', 'somedb')
@not_windows_ci
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_query_timeout(aggregator, dd_run_check, instance_docker):
_run_test_query_timeout(aggregator, dd_run_check, instance_docker)
@windows_ci
@pytest.mark.integration
def test_query_timeout_windows(aggregator, dd_run_check, instance_sql_msoledb):
_run_test_query_timeout(aggregator, dd_run_check, instance_sql_msoledb)
def _run_test_query_timeout(aggregator, dd_run_check, instance):
instance['command_timeout'] = 1
check = SQLServer(CHECK_NAME, {}, [instance])
check.initialize_connection()
with check.connection.open_managed_default_connection():
with check.connection.get_managed_cursor() as cursor:
# should complete quickly
cursor.execute("select 1")
assert cursor.fetchall(), "should have a result here"
with pytest.raises(Exception) as e:
cursor.execute("waitfor delay '00:00:02'")
if isinstance(e, pyodbc.OperationalError):
assert 'timeout' in "".join(e.args).lower(), "must be a timeout"
else:
import adodbapi
assert type(e) == adodbapi.apibase.DatabaseError
assert 'timeout' in "".join(e.args).lower(), "must be a timeout"
@not_windows_ci
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_connection_cleanup(instance_docker):
check = SQLServer(CHECK_NAME, {}, [instance_docker])
check.initialize_connection()
# regular operation
with check.connection.open_managed_default_connection():
assert len(check.connection._conns) == 1
with check.connection.get_managed_cursor() as cursor:
cursor.execute("select 1")
assert len(check.connection._conns) == 1
assert len(check.connection._conns) == 0, "connection should have been closed"
# db exception
with pytest.raises(Exception) as e:
with check.connection.open_managed_default_connection():
assert len(check.connection._conns) == 1
with check.connection.get_managed_cursor() as cursor:
assert len(check.connection._conns) == 1
cursor.execute("gimme some data")
assert "incorrect syntax" in str(e).lower()
assert len(check.connection._conns) == 0, "connection should have been closed"
# application exception
with pytest.raises(Exception) as e:
with check.connection.open_managed_default_connection():
assert len(check.connection._conns) == 1
with check.connection.get_managed_cursor():
assert len(check.connection._conns) == 1
raise Exception("oops")
assert "oops" in str(e)
assert len(check.connection._conns) == 0, "connection should have been closed"
| 43.855346 | 106 | 0.687222 |
980ad55bf6fe6c9339d537913ea75a0ff7664e85 | 36 | py | Python | tests/__init__.py | AaronLaw/crypto | 92fb012daf1a770a8481522ef3c683494696f227 | [
"MIT"
] | null | null | null | tests/__init__.py | AaronLaw/crypto | 92fb012daf1a770a8481522ef3c683494696f227 | [
"MIT"
] | null | null | null | tests/__init__.py | AaronLaw/crypto | 92fb012daf1a770a8481522ef3c683494696f227 | [
"MIT"
] | null | null | null | """Unit test package for crypto."""
| 18 | 35 | 0.666667 |
61603c37f778f56256190b193c8f02b80e005c6b | 85,116 | py | Python | brian2/spatialneuron/morphology.py | Kyzarok/SNNProject | 14b555e221dbdd5100cb4f6333e49030423462ea | [
"BSD-2-Clause"
] | 2 | 2020-03-20T13:30:19.000Z | 2020-03-20T13:30:57.000Z | brian2/spatialneuron/morphology.py | deep-introspection/brian2 | 6c50e3a22d0e83a2b75c5bcf6f7c1d889311fca1 | [
"BSD-2-Clause"
] | null | null | null | brian2/spatialneuron/morphology.py | deep-introspection/brian2 | 6c50e3a22d0e83a2b75c5bcf6f7c1d889311fca1 | [
"BSD-2-Clause"
] | null | null | null | '''
Neuronal morphology module.
This module defines classes to load and build neuronal morphologies.
'''
import abc
import numbers
from abc import abstractmethod
from collections import OrderedDict, defaultdict, namedtuple
import os
from brian2.units.allunits import meter
from brian2.utils.logger import get_logger
from brian2.units.stdunits import um
from brian2.units.fundamentalunits import (have_same_dimensions, Quantity,
check_units, DimensionMismatchError)
from brian2 import numpy as np
logger = get_logger(__name__)
__all__ = ['Morphology', 'Section', 'Cylinder', 'Soma']
Node = namedtuple('Node',
field_names='index,comp_name,x,y,z,diameter,parent,children')
def _to_meters(value):
'''
Helper function to convert a floating point value (or array) to a `Quantity`
in units of "meter", but also allow for ``None`` and return it as it is.
'''
if value is None:
return None
else:
return Quantity(value, dim=meter.dim)
def _from_morphology(variable, i, j):
'''
Helper function to return coordinates from a main morphology (used by
`SubMorphology`), dealing with ``None``.
'''
if variable is None:
return None
return variable[i:j]
class MorphologyIndexWrapper(object):
'''
A simpler version of `~brian2.groups.group.IndexWrapper`, not allowing for
string indexing (`Morphology` is not a `Group`). It allows to use
``morphology.indices[...]`` instead of ``morphology[...]._indices()``.
'''
def __init__(self, morphology):
self.morphology = morphology
def __getitem__(self, item):
if isinstance(item, basestring):
raise NotImplementedError(('Morphologies do not support string '
'indexing'))
assert isinstance(self.morphology, (SubMorphology, Morphology))
return self.morphology._indices(item)
def _calc_start_idx(section):
'''
Calculate the absolute start index that will be used by a flattened
representation.
'''
# calculate the absolute start index of this section
# 1. find the root of the tree
root = section
while root._parent is not None:
root = root._parent
# 2. go down from the root and advance the indices until we find
# the current section
start_idx, found = _find_start_index(root, section)
assert found
return start_idx
def _find_start_index(current, target_section, index=0):
if current == target_section:
return index, True
index += current.n
for child in current.children:
if child == target_section:
return index, True
else:
index, found = _find_start_index(child, target_section, index)
if found:
return index, True
return index, False
class Topology(object):
'''
A representation of the topology of a `Morphology`. Has a useful string
representation, inspired by NEURON's ``topology`` function.
'''
def __init__(self, morphology):
self.morphology = morphology
def __str__(self):
# TODO: Make sure that the shown compartments do not get out of hand
divisor = 1
return Topology._str_topology(self.morphology, compartments_divisor=divisor)
@staticmethod
def _str_topology(morphology, indent=0, named_path='',
compartments_divisor=1, parent=None):
'''
A simple string-based representation of a morphology. Inspired by
NEURON's ``topology`` function.
'''
description = ' '*indent
length = max([1, morphology.n//compartments_divisor])
if parent is not None:
description += '`'
if isinstance(morphology, Soma):
description += '( )'
else:
description += '-' * length
description += '|'
if len(named_path) == 0:
description += ' [root] \n'
else:
description += ' ' + named_path + '\n'
for child in morphology.children:
name = morphology.children.name(child)
description += Topology._str_topology(child,
indent=indent+2+length,
named_path=named_path+'.'+name,
compartments_divisor=compartments_divisor,
parent=morphology)
return description
__repr__ = __str__
def _rotate(vec, axis, angle):
'''
Rotate a vector around an arbitrary axis.
Parameters
----------
vec : `ndarray`
The vector to rotate.
axis : `ndarray`
The axis around which the vector should be rotated.
angle : float
The rotation angle (in radians).
Returns
-------
rotated : `ndarray`
The rotated vector.
'''
return (vec*np.cos(angle) -
np.cross(axis, vec)*np.sin(angle) +
axis*np.dot(axis, vec)*(1 - np.cos(angle)))
def _perturb(vec, sigma):
if sigma == 0:
return vec
# Get an arbitrary orthogonal vector
if vec[1] != 0 or vec[0] != 0:
orthogonal = np.hstack([vec[1], vec[0], 0])
else: # special case for the [0, 0, 1] vector
orthogonal = np.array([1, 0, 0])
# Rotate the orthogonal vector
orthogonal = _rotate(orthogonal, vec, np.random.rand()*np.pi*2)
# Use an exponentially distributed angle for the perturbation
perturbation = np.random.exponential(sigma, 1)
return _rotate(vec, orthogonal, perturbation)
def _add_coordinates(orig_morphology, root=None, parent=None, name=None,
section_randomness=0.0, compartment_randomness=0.0,
n_th_child=0, total_children=0,
overwrite_existing=False):
# Note that in the following, all values are without physical units
# The new direction is based on the direction of the parent section
if parent is None:
section_dir = np.array([0, 0, 0])
else:
section_dir = np.hstack([np.asarray(parent.end_x[-1] - parent.start_x[0]),
np.asarray(parent.end_y[-1] - parent.start_y[0]),
np.asarray(parent.end_z[-1] - parent.start_z[0])])
parent_dir_norm = np.sqrt(np.sum(section_dir**2))
if parent_dir_norm != 0:
section_dir /= parent_dir_norm
else:
section_dir = np.array([0, 0, 0])
if not overwrite_existing and orig_morphology.x is not None:
section = orig_morphology.copy_section()
elif isinstance(orig_morphology, Soma):
# No perturbation for the soma
section = Soma(diameter=orig_morphology.diameter,
x=section_dir[0]*meter,
y=section_dir[1]*meter,
z=section_dir[2]*meter)
else:
if np.sum(section_dir**2) == 0:
# We don't have any direction to base this section on (most common
# case is that the root section is a soma)
# We stay in the x-y plane and distribute all children in a 360 degree
# circle around (0, 0, 0)
section_dir = np.array([1, 0, 0])
rotation_axis = np.array([0, 0, 1])
angle_increment = 2*np.pi/total_children
rotation_angle = np.pi/2 + angle_increment * n_th_child
section_dir = _rotate(section_dir, rotation_axis, rotation_angle)
else:
if section_randomness == 0 and section_dir[2] == 0: # If we are in the x-y plane, stay there
rotation_axis = np.array([0, 0, 1])
else:
rotation_axis = np.array([-section_dir[1], section_dir[2], 0])
if section_randomness == 0:
angle_increment = np.pi/(total_children + 1)
rotation_angle = -np.pi/2 + angle_increment * (n_th_child + 1)
section_dir = _rotate(section_dir, rotation_axis, rotation_angle)
if section_randomness > 0:
# Rotate randomly
section_dir = _perturb(section_dir, section_randomness)
section_dir_norm = np.sqrt(np.sum(section_dir**2))
section_dir /= section_dir_norm
# For a soma, we let child sections begin at the surface of the sphere
if isinstance(parent, Soma):
origin = parent.diameter/2*section_dir
else:
origin = (0, 0, 0)*um
coordinates = np.zeros((orig_morphology.n + 1, 3))*meter
start_coords = origin
coordinates[0, :] = origin
# Perturb individual compartments as well
for idx, length in enumerate(orig_morphology.length):
compartment_dir = _perturb(section_dir, compartment_randomness)
compartment_dir_norm = np.sqrt(np.sum(compartment_dir**2))
compartment_dir /= compartment_dir_norm
current_coords = start_coords + length*compartment_dir
coordinates[idx + 1, :] = current_coords
start_coords = current_coords
if isinstance(orig_morphology, Cylinder) and compartment_randomness == 0:
section = Cylinder(n=orig_morphology.n,
diameter=orig_morphology.diameter[0],
x=coordinates[[0, -1], 0],
y=coordinates[[0, -1], 1],
z=coordinates[[0, -1], 2],
type=orig_morphology.type)
elif isinstance(orig_morphology, Section):
section = Section(n=orig_morphology.n,
diameter=np.hstack([orig_morphology.start_diameter[0],
orig_morphology.end_diameter])*meter,
x=coordinates[:, 0],
y=coordinates[:, 1],
z=coordinates[:, 2],
type=orig_morphology.type)
else:
raise NotImplementedError(('Do not know how to deal with section of '
'type %s.' % type(orig_morphology)))
if parent is None:
root = section
else:
parent.children.add(name, section)
for idx, child in enumerate(orig_morphology.children):
_add_coordinates(child, root=root, parent=section,
name=orig_morphology.children.name(child),
n_th_child=idx, total_children=len(orig_morphology.children),
section_randomness=section_randomness,
compartment_randomness=compartment_randomness,
overwrite_existing=overwrite_existing)
return section
class Children(object):
'''
Helper class to represent the children (sub trees) of a section. Can be
used like a dictionary (mapping names to `Morphology` objects), but iterates
over the values (sub trees) instead of over the keys (names).
'''
def __init__(self, owner):
self._owner = owner
self._counter = 0
self._children = []
self._named_children = {}
self._given_name = defaultdict(lambda: None)
def __iter__(self):
return iter(self._children)
def __len__(self):
return len(self._children)
def __contains__(self, item):
return item in self._named_children
def name(self, child):
'''
Return the given name (i.e. not the automatic name such as ``1``) for a
child subtree.
Parameters
----------
child : `Morphology`
Returns
-------
name : str
The given name for the ``child``.
'''
return self._given_name[child]
def __getitem__(self, item):
if isinstance(item, basestring):
return self._named_children[item]
else:
raise TypeError('Index has to be an integer or a string.')
def add(self, name, subtree, automatic_name=False):
'''
Add a new child to the morphology.
Parameters
----------
name : str
The name (e.g. ``"axon"``, ``"soma"``) to use for this sub tree.
subtree : `Morphology`
The subtree to link as a child.
automatic_name : bool, optional
Whether to chose a new name automatically, if a subtree of the same
name already exists (uses e.g. ``"dend2"`` instead ``"dend"``).
Defaults to ``False`` and will raise an error instead.
'''
if (name in self._named_children and
self._named_children[name] is not subtree):
if automatic_name:
basename = name
counter = 1
while name in self._named_children:
counter += 1
name = basename + str(counter)
else:
raise AttributeError('The name %s is already used for a '
'subtree.' % name)
if subtree not in self._children:
self._counter += 1
self._children.append(subtree)
self._named_children[str(self._counter)] = subtree
self._given_name[subtree] = name
if name is not None:
self._named_children[name] = subtree
subtree._parent = self._owner
def remove(self, name):
'''
Remove a subtree from this morphology.
Parameters
----------
name : str
The name of the sub tree to remove.
'''
if name not in self:
raise AttributeError('The subtree ' + name + ' does not exist')
subtree = self._named_children[name]
del self._named_children[name]
self._children.remove(subtree)
subtree._parent = None
def __repr__(self):
n = len(self._children)
s = '<%d children' % n
if n > 0:
name_dict = {self.name(sec): sec for sec in self._children}
s += ': %r' % name_dict
return s + '>'
class Morphology(object):
'''
Neuronal morphology (tree structure).
The data structure is a tree where each node is an un-branched section
consisting of a number of connected compartments, each one defined by its
geometrical properties (length, area, diameter, position).
Notes
-----
You cannot create objects of this class, create a `Soma`, a `Section`, or
a `Cylinder` instead.
'''
__metaclass__ = abc.ABCMeta
@check_units(n=1)
def __init__(self, n, type=None):
if isinstance(n, basestring):
raise TypeError('Need the number of compartments, not a string. '
'If you want to load a morphology from a file, '
'use Morphology.from_file instead.')
self._n = int(n)
if self._n != n:
raise TypeError('The number of compartments n has to be an integer '
'value.')
if n <= 0:
raise ValueError('The number of compartments n has to be at least 1.')
self.type = type
self._children = Children(self)
self._parent = None
self.indices = MorphologyIndexWrapper(self)
def __getitem__(self, item):
'''
Return the subtree with the given name/index.
Ex.: ```neuron['axon']``` or ```neuron['11213']```
```neuron[10*um:20*um]``` returns the subbranch from 10 um to 20 um.
```neuron[10*um]``` returns one compartment.
```neuron[5]``` returns compartment number 5.
'''
if isinstance(item, slice): # neuron[10*um:20*um] or neuron[1:3]
using_lengths = all([arg is None or have_same_dimensions(arg, meter)
for arg in [item.start, item.stop]])
using_ints = all([arg is None or int(arg) == float(arg)
for arg in [item.start, item.stop]])
if not (using_lengths or using_ints):
raise TypeError('Index slice has to use lengths or integers')
if using_lengths:
if item.step is not None:
raise TypeError(('Cannot provide a step argument when '
'slicing with lengths'))
l = np.cumsum(np.asarray(self.length)) # coordinate on the section
# We use a special handling for values very close to the points
# between the compartments to avoid non-intuitive rounding
# effects: a point closer than 1e-12*length of section will be
# considered to be within the following section (for a start
# index), respectively within the previous section (for an end
# index)
if item.start is None:
i = 0
else:
diff = np.abs(float(item.start) - l)
if min(diff) < 1e-12 * l[-1]:
i = np.argmin(diff) + 1
else:
i = np.searchsorted(l, item.start)
if item.stop is None:
j = len(l)
else:
diff = np.abs(float(item.stop) - l)
if min(diff) < 1e-12 * l[-1]:
j = np.argmin(diff) + 1
else:
j = np.searchsorted(l, item.stop) + 1
else: # integers
i, j, step = item.indices(self.n)
if step != 1:
raise TypeError('Can only slice a contiguous segment')
elif isinstance(item, Quantity) and have_same_dimensions(item, meter):
l = np.hstack([0, np.cumsum(np.asarray(self.length))]) # coordinate on the section
if float(item) < 0 or float(item) > (1 + 1e-12) * l[-1]:
raise IndexError(('Invalid index %s, has to be in the interval '
'[%s, %s].' % (item, 0*meter, l[-1]*meter)))
diff = np.abs(float(item) - l)
if min(diff) < 1e-12 * l[-1]:
i = np.argmin(diff)
else:
i = np.searchsorted(l, item) - 1
j = i + 1
elif isinstance(item, numbers.Integral): # int: returns one compartment
if item < 0: # allows e.g. to use -1 to get the last compartment
item += self.n
if item >= self.n:
raise IndexError(('Invalid index %d '
'for %d compartments') % (item, self.n))
i = item
j = i + 1
elif isinstance(item, basestring):
item = str(item) # convert int to string
if (len(item) > 1) and all([c in 'LR123456789' for c in
item]): # binary string of the form LLLRLR or 1213 (or mixed)
return self._children[item[0]][item[1:]]
elif item in self._children:
return self._children[item]
else:
raise AttributeError('The subtree ' + item + ' does not exist')
else:
raise TypeError('Index of type %s not understood' % type(item))
return SubMorphology(self, i, j)
def __setitem__(self, item, child):
'''
Inserts the subtree and name it ``item``.
Ex.: ``neuron['axon']`` or ``neuron['11213']``
'''
item = str(item) # convert int to string
if (len(item) > 1) and all([c in 'LR123456789' for c in item]):
# binary string of the form LLLRLR or 1213 (or mixed)
self.children[item[0]][item[1:]] = child
else:
self.children.add(item, child)
def __delitem__(self, item):
'''
Remove the subtree ``item``.
'''
item = str(item) # convert int to string
if (len(item) > 1) and all([c in 'LR123456789' for c in item]):
# binary string of the form LLLRLR or 1213 (or mixed)
del self._children[item[0]][item[1:]]
else:
self._children.remove(item)
def __getattr__(self, item):
'''
Return the subtree named ``item``.
Ex.: ``axon = neuron.axon``
'''
if item.startswith('_'):
return super(object, self).__getattr__(item)
else:
return self[item]
def __setattr__(self, item, child):
'''
Attach a subtree and name it ``item``.
Ex.: ``neuron.axon = Soma(diameter=10*um)``
'''
if isinstance(child, Morphology) and not item.startswith('_'):
self[item] = child
else: # If it is not a subtree, then it's a normal class attribute
object.__setattr__(self, item, child)
def __delattr__(self, item):
'''
Remove the subtree ``item``.
'''
del self[item]
def _indices(self, item=None, index_var='_idx'):
'''
Return compartment indices for the main section, relative to the
original morphology.
'''
if index_var != '_idx':
raise AssertionError('Unexpected index %s' % index_var)
if not (item is None or item == slice(None)):
if isinstance(item, slice):
# So that this always returns an array of values, even if it is
# just a single value
return self[item]._indices(slice(None))
else:
return self[item]._indices(None)
else:
start_idx = _calc_start_idx(self)
if self.n == 1 and item is None:
return start_idx
else:
return np.arange(start_idx, start_idx + self.n)
def topology(self):
'''
Return a representation of the topology
Returns
-------
topology : `Topology`
An object representing the topology (can be converted to a string
by using ``str(...)`` or simply by printing it with `print`.)
'''
return Topology(self)
def generate_coordinates(self,
section_randomness=0.0,
compartment_randomness=0.0,
overwrite_existing=False):
r'''
Create a new `Morphology`, with coordinates filled in place where the
previous morphology did not have any. This is mostly useful for
plotting a morphology, it does not affect its electrical properties.
Parameters
----------
section_randomness : float, optional
The randomness when deciding the direction vector for each new
section. The given number is the :math:`\beta` parameter of an
exponential distribution (in degrees) which will be used to
determine the deviation from the direction of the parent section.
If the given value equals 0 (the default), then a deterministic
algorithm will be used instead.
compartment_randomness : float, optional
The randomness when deciding the direction vector for each
compartment within a section. The given number is the :math:`\beta`
parameter of an exponential distribution (in degrees) which will be
used to determine the deviation from the main direction of the
current section. If the given value equals 0 (the default), then all
compartments will be along a straight line.
overwrite_existing : bool, optional
Whether to overwrite existing coordinates in the morphology. This
is by default set to ``False``, meaning that only sections that do
not currently have any coordinates set will get new coordinates.
This allows to conveniently generate a morphology that can be
plotted for a morphology that is based on points but also has
artificially added sections (the most common case: an axon added
to a reconstructed morphology). If set to ``True``, all sections
will get new coordinates. This can be useful to either get a
schematic representation of the morphology (with
``section_randomness`` and ``compartment_randomness`` both 0) or to
simply generate a new random variation of a morphology (which will
still be electrically equivalent, of course).
Returns
-------
morpho_with_coordinates : `Morphology`
The same morphology, but with coordinates
'''
# Convert to radians
section_randomness *= np.pi/180
compartment_randomness *= np.pi/180
return _add_coordinates(self, section_randomness=section_randomness,
compartment_randomness=compartment_randomness,
overwrite_existing=overwrite_existing)
@abstractmethod
def copy_section(self):
'''
Create a copy of the current section (attributes of this section only,
not re-creating the parent/children relation)
Returns
-------
copy : `Morphology`
A copy of this section (without the links to the parent/children)
'''
raise NotImplementedError()
@property
def n(self):
'''
The number of compartments in this section.
'''
return self._n
def __len__(self):
'''
This is not well-defined, use `Morphology.n` or
`Morphology.total_compartments` instead.
'''
raise TypeError('The "length" of a Morphology is ambiguous, use its '
'"n" attribute for the number of compartments in this '
'section or the "total_compartments" attribute for the '
'total number of compartments in the whole sub-tree.')
@property
def total_compartments(self):
'''
The total number of compartments in this subtree (i.e. the number of
compartments in this section plus all the compartments in the sections
deeper in the tree).
'''
return self.n + sum(c.total_compartments for c in self.children)
@property
def total_sections(self):
'''
The total number of sections in this subtree.
'''
return 1 + sum(c.total_sections for c in self.children)
@property
def parent(self):
'''
The parent section of this section.
'''
return self._parent
@property
def children(self):
'''
The children (as a `Children` object) of this section.
'''
return self._children
@abc.abstractproperty
def end_distance(self):
'''
The distance to the root of the morphology at the end of this section.
'''
raise NotImplementedError()
# Per-compartment attributes
@abc.abstractproperty
def area(self):
'''
The membrane surface area of each compartment in this section.
'''
raise NotImplementedError()
@abc.abstractproperty
def volume(self):
'''
The volume of each compartment in this section.
'''
raise NotImplementedError()
@abc.abstractproperty
def length(self):
'''
The length of each compartment in this section.
'''
raise NotImplementedError()
@abc.abstractproperty
def r_length_1(self):
'''
The geometry-dependent term to calculate the conductance between the
start and the midpoint of each compartment. Dividing this value by the
Intracellular resistivity gives the conductance.
'''
raise NotImplementedError()
@abc.abstractproperty
def r_length_2(self):
'''
The geometry-dependent term to calculate the conductance between the
midpoint and the end of each compartment. Dividing this value by the
Intracellular resistivity gives the conductance.
'''
raise NotImplementedError()
# At-midpoint attributes
@abc.abstractproperty
def diameter(self):
'''
The diameter at the middle of each compartment in this section.
'''
raise NotImplementedError()
@abc.abstractproperty
def distance(self):
'''
The total distance between the midpoint of each compartment and the root
of the morphology.
'''
raise NotImplementedError()
@property
def start_x(self):
'''
The x coordinate at the beginning of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.start_x_)
@property
def start_y(self):
'''
The y coordinate at the beginning of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.start_y_)
@property
def start_z(self):
'''
The z coordinate at the beginning of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.start_z_)
@abc.abstractproperty
def start_x_(self):
'''
The x coordinate (as a unitless floating point number) at the beginning
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
raise NotImplementedError()
@abc.abstractproperty
def start_y_(self):
'''
The y coordinate (as a unitless floating point number) at the beginning
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
raise NotImplementedError()
@abc.abstractproperty
def start_z_(self):
'''
The z coordinate (as a unitless floating point number) at the beginning
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
raise NotImplementedError()
@property
def x(self):
'''
The x coordinate at the midpoint of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.x_)
@property
def y(self):
'''
The y coordinate at the midpoint of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.y_)
@property
def z(self):
'''
The y coordinate at the midpoint of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.z_)
@abc.abstractproperty
def x_(self):
'''
The x coordinate (as a unitless floating point number) at the midpoint
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
raise NotImplementedError()
@abc.abstractproperty
def y_(self):
'''
The y coordinate (as a unitless floating point number) at the midpoint
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
raise NotImplementedError()
@abc.abstractproperty
def z_(self):
'''
The z coordinate (as a unitless floating point number) at the midpoint
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
raise NotImplementedError()
@property
def end_x(self):
'''
The x coordinate at the end of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.end_x_)
@property
def end_y(self):
'''
The y coordinate at the end of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.end_y_)
@property
def end_z(self):
'''
The z coordinate at the end of each compartment. Returns ``None``
for morphologies without coordinates.
'''
return _to_meters(self.end_z_)
@abc.abstractproperty
def end_x_(self):
'''
The x coordinate (as a unitless floating point number) at the end of
each compartment. Returns ``None`` for morphologies without coordinates.
'''
raise NotImplementedError()
@abc.abstractproperty
def end_y_(self):
'''
The y coordinate (as a unitless floating point number) at the end of
each compartment. Returns ``None`` for morphologies without coordinates.
'''
raise NotImplementedError()
@abc.abstractproperty
def end_z_(self):
'''
The z coordinate (as a unitless floating point number) at the end of
each compartment. Returns ``None`` for morphologies without coordinates.
'''
raise NotImplementedError()
@property
def coordinates(self):
r'''
Array with all coordinates at the start- and end-points of each
compartment in this section. The array has size :math:`(n+1) \times 3`,
where :math:`n` is the number of compartments in this section. Each
row is one point (start point of first compartment, end point of first
compartment, end point of second compartment, ...), with the columns
being the x, y, and z coordinates. Returns ``None`` for morphologies
without coordinates.
'''
if self.x_ is None:
return None
else:
return Quantity(self.coordinates_, dim=meter.dim)
@property
def coordinates_(self):
r'''
Array with all coordinates (as unitless floating point numbers) at the
start- and end-points of each compartment in this section. The array has
size :math:`(n+1) \times 3`, where :math:`n` is the number of
compartments in this section. Each row is one point (start point of
first compartment, end point of first compartment, end point of second
compartment, ...), with the columns being the x, y, and z coordinates.
Returns ``None`` for morphologies without coordinates.
'''
if self.x_ is None:
return None
else:
return np.vstack([np.hstack([self.start_x_[0], self.end_x_[:]]),
np.hstack([self.start_y_[0], self.end_y_[:]]),
np.hstack([self.start_z_[0], self.end_z_[:]])]).T
@staticmethod
def _create_section(compartments, name, parent, sections,
spherical_soma):
if (spherical_soma and
len(compartments) == 1 and
compartments[0].comp_name == 'soma'):
soma = compartments[0]
section = Soma(diameter=soma.diameter * um,
x=soma.x * um, y=soma.y * um, z=soma.z * um)
else:
sec_x, sec_y, sec_z, sec_diameter = zip(*[(c.x, c.y, c.z,
c.diameter)
for c in compartments])
# Add a point for the end of the parent_idx compartment
if parent is not None:
n = len(compartments)
if (parent.comp_name is not None and
parent.comp_name.lower() == 'soma'):
# For a Soma, we don't use its diameter
start_diameter = sec_diameter[0]
else:
start_diameter = parent.diameter
# Use relative coordinates
sec_x = np.array(sec_x) - parent.x
sec_y = np.array(sec_y) - parent.y
sec_z = np.array(sec_z) - parent.z
start_x = start_y = start_z = 0.
else:
n = len(compartments) - 1
start_diameter = sec_diameter[0]
sec_diameter = sec_diameter[1:]
start_x = sec_x[0]
start_y = sec_y[0]
start_z = sec_z[0]
sec_x = sec_x[1:]
sec_y = sec_y[1:]
sec_z = sec_z[1:]
diameter = np.hstack([start_diameter, sec_diameter])*um
x = np.hstack([start_x, sec_x])*um
y = np.hstack([start_y, sec_y])*um
z = np.hstack([start_z, sec_z])*um
section = Section(n=n, diameter=diameter, x=x, y=y, z=z,
type=name)
# Add the section as a child to its parent
if parent is not None:
parent_sec = sections[parent.index]
parent_sec.children.add(name, section, automatic_name=True)
return section
@staticmethod
def _compartments_to_sections(compartment, spherical_soma,
current_compartments=None, sections=None):
# Merge all unbranched compartments of the same type into a single
# section
if sections is None:
sections = OrderedDict()
if current_compartments is None:
current_compartments = []
current_compartments.append(compartment)
# We have to create a new section, if we are either
# 1. at a leaf of the tree or at a branching point, or
# 2. if the compartment type changes
if (len(compartment.children) != 1 or
compartment.comp_name != compartment.children[0].comp_name):
parent = current_compartments[0].parent
section = Morphology._create_section(current_compartments,
compartment.comp_name,
parent=parent,
sections=sections,
spherical_soma=spherical_soma)
sections[current_compartments[-1].index] = section
# If we are at a branching point, recurse into all subtrees
for child in compartment.children:
Morphology._compartments_to_sections(child,
spherical_soma=spherical_soma,
current_compartments=None,
sections=sections)
else:
# A single child of the same type, continue (recursive call)
Morphology._compartments_to_sections(compartment.children[0],
spherical_soma=spherical_soma,
current_compartments=current_compartments,
sections=sections)
return sections
@staticmethod
def _replace_three_point_soma(compartment, all_compartments):
# Replace a three-point/two-cylinder soma by a single spherical soma
# if possible (see http://neuromorpho.org/SomaFormat.html for some
# details)
# We are looking for a node with two children of the soma type (and
# other childen of other types), where the two children don't have any
# children of their own
soma_children = [c for c in compartment.children
if c.comp_name == 'soma']
if (compartment.comp_name == 'soma' and len(soma_children) == 2 and
all(len(c.children) == 0 for c in soma_children)):
# We've found a 3-point soma to replace
soma_c = [compartment] + soma_children
if not all(abs(c.diameter - soma_c[0].diameter) < 1e-15
for c in soma_c):
indices = ', '.join(str(c.index) for c in soma_c)
raise ValueError('Found a "3-point-soma" (lines: %s), but not '
'all the diameters are '
'identical.' % indices)
diameter = soma_c[0].diameter
point_0 = np.array([soma_c[0].x, soma_c[0].y, soma_c[0].z])
point_1 = np.array([soma_c[1].x, soma_c[1].y, soma_c[1].z])
point_2 = np.array([soma_c[2].x, soma_c[2].y, soma_c[2].z])
length_1 = np.sqrt(np.sum((point_1 - point_0) ** 2))
length_2 = np.sqrt(np.sum((point_2 - point_0) ** 2))
if (np.abs(length_1 - diameter / 2) > 0.01 or
np.abs(length_2 - diameter / 2) > 0.01):
raise ValueError(('Cannot replace "3-point-soma" by a single '
'point, the second and third points should '
'be positioned one radius away from the '
'first point. Distances are %.3fum and '
'%.3fum, respectively, while the '
'radius is %.3fum.') % (length_1,
length_2,
diameter / 2))
children = [c for c in compartment.children
if not c in soma_c]
compartment = Node(index=compartment.index, comp_name='soma',
x=point_0[0], y=point_0[1], z=point_0[2],
diameter=diameter, parent=compartment.parent,
children=children)
all_compartments[compartment.index] = compartment
del all_compartments[soma_children[0].index]
del all_compartments[soma_children[1].index]
# Recurse further down the tree
all_compartments[compartment.index] = compartment
for child in compartment.children:
Morphology._replace_three_point_soma(child,
all_compartments)
@staticmethod
def from_points(points, spherical_soma=True):
'''
Create a morphology from a sequence of points (similar to the ``SWC``
format, see `Morphology.from_swc_file`). Each point has to be
a 7-tuple: ``(index, name, x, y, z, diameter, parent)``
Note that the values should not use units, but are instead all taken
to be in micrometers.
Parameters
----------
points : sequence of 7-tuples
The points of the morphology.
spherical_soma : bool, optional
Whether to model a soma as a sphere.
Returns
-------
morphology : `Morphology`
Notes
-----
This format closely follows the SWC format (see
`Morphology.from_swc_file`) with two differences: the ``type`` should
be a string (e.g. ``'soma'``) instead of an integer and the 6-th element
should be the diameter and not the radius.
'''
# First pass through all points to get the dependency structure
compartments = OrderedDict()
for counter, point in enumerate(points):
if len(point) != 7:
raise ValueError('Each point needs to be described by 7 '
'values, got %d instead.' % len(point))
index, name, x, y, z, diameter, parent_idx = point
if index in compartments:
raise ValueError('Two compartments with index %d' % index)
if parent_idx == index:
raise ValueError('Compartment %d lists itself as the parent '
'compartment.' % index)
if counter == 0 and parent_idx == -1:
parent = None # The first compartment does not have a parent
elif parent_idx not in compartments:
raise ValueError(('Did not find the compartment %d (parent '
'compartment of compartment %d). Make sure '
'that parent compartments are listed before '
'their children.') % (parent_idx, index))
else:
parent = compartments[parent_idx]
children = []
node = Node(index, name, x, y, z, diameter, parent, children)
compartments[index] = node
if parent is not None:
parent.children.append(node)
if spherical_soma:
Morphology._replace_three_point_soma(compartments.values()[0],
compartments)
sections = Morphology._compartments_to_sections(compartments.values()[0],
spherical_soma)
# Go through all the sections again and add standard names for all
# sections (potentially in addition to the name they already have):
# "L" + "R" for one or two children, "1", "2", "3", etc. otherwise
children_counter = defaultdict(int)
for section in sections.itervalues():
parent = section.parent
if parent is not None:
children_counter[parent] += 1
children = parent.children
nth_child = children_counter[parent]
if len(children) <= 2:
name = 'L' if nth_child == 1 else 'R'
else:
name = '%d' % nth_child
children.add(name, section)
# There should only be one section without parents
root = [sec for sec in sections.itervalues() if sec.parent is None]
assert len(root) == 1
return root[0]
@staticmethod
def from_swc_file(filename, spherical_soma=True):
'''
Load a morphology from a ``SWC`` file. A large database of morphologies
in this format can be found at http://neuromorpho.org
The format consists of an optional header of lines starting with ``#``
(ignored), followed by a sequence of points, each described in a line
following the format::
index type x y z radius parent
``index`` is an integer label (starting at 1) that identifies the
current point and increases by one each line. ``type`` is an integer
representing the type of the neural segment. The only type that changes
the interpretation by Brian is the type ``1`` which signals a soma.
Types ``2`` (axon), ``3`` (dendrite), and ``4`` (apical dendrite) are
used to give corresponding names to the respective sections. All other
types are ignored. ``x``, ``y``, and ``z`` are the cartesian coordinates
at each point and ``r`` is its radius. ``parent`` refers to the index
of the parent point or is ``-1`` for the root point.
Parameters
----------
filename : str
The name of the ``SWC`` file.
spherical_soma : bool, optional
Whether to model the soma as a sphere.
Returns
-------
morpho : `Morphology`
The morphology stored in the given file.
'''
swc_types = defaultdict(lambda: None)
# The following names will be translated into names, all other will be
# ignored
swc_types.update({'1': 'soma', '2': 'axon', '3': 'dend', '4': 'apic'})
with open(filename, 'r') as f:
points = []
for line_no, line in enumerate(f):
line = line.strip()
if line.startswith('#') or len(line) == 0:
# Ignore comments or empty lines
continue
splitted = line.split()
if len(splitted) != 7:
raise ValueError('Each line of an SWC file has to contain '
'7 space-separated entries, but line %d '
'contains %d.' % (line_no + 1,
len(splitted)))
index, comp_type, x, y, z, radius, parent = splitted
points.append((int(index),
swc_types[comp_type],
float(x),
float(y),
float(z),
2*float(radius),
int(parent)))
return Morphology.from_points(points, spherical_soma=spherical_soma)
@staticmethod
def from_file(filename, spherical_soma=True):
'''
Convencience method to load a morphology from a given file. At the
moment, only ``SWC`` files are supported, calling this function is
therefore equivalent to calling `Morphology.from_swc_file` directly.
Parameters
----------
filename : str
The name of a file storing a morphology.
spherical_soma : bool, optional
Whether to model the soma as a sphere.
Returns
-------
morphology : `Morphology`
The morphology stored in the given file.
'''
_, ext = os.path.splitext(filename)
if ext.lower() == '.swc':
return Morphology.from_swc_file(filename,
spherical_soma=spherical_soma)
else:
raise NotImplementedError('Currently, SWC is the only supported '
'file format.')
class SubMorphology(object):
'''
A view on a subset of a section in a morphology.
'''
def __init__(self, morphology, i, j):
self._morphology = morphology
self.indices = MorphologyIndexWrapper(self)
self._i = i
self._j = j
def _indices(self, item=None):
if not (item is None or item == slice(None)):
raise IndexError('Cannot index a view on a subset of a section further')
# Start index of the main section
start_idx = _calc_start_idx(self._morphology)
if item is None and self.n == 1:
return start_idx + self._i
else:
return np.arange(start_idx + self._i, start_idx + self._j)
@property
def n(self):
'''
The number of compartments in this sub-section.
'''
return self._j - self._i
def __len__(self):
return self.n
@property
def n_sections(self):
'''
The number of sections in this sub-section (always 1).
'''
return 1
# Per-compartment attributes
@property
def area(self):
'''
The membrane surface area of each compartment in this sub-section.
'''
return self._morphology.area[self._i:self._j]
@property
def volume(self):
'''
The volume of each compartment in this sub-section.
'''
return self._morphology.volume[self._i:self._j]
@property
def length(self):
'''
The length of each compartment in this sub-section.
'''
return self._morphology.length[self._i:self._j]
@property
def r_length_1(self):
'''
The geometry-dependent term to calculate the conductance between the
start and the midpoint of each compartment in this sub-section.
Dividing this value by the Intracellular resistivity gives the
conductance.
'''
return self._morphology.r_length_1[self._i:self._j]
@property
def r_length_2(self):
'''
The geometry-dependent term to calculate the conductance between the
midpoint and the end of each compartment in this sub-section. Dividing
this value by the Intracellular resistivity gives the conductance.
'''
return self._morphology.r_length_2[self._i:self._j]
# At-midpoint attributes
@property
def diameter(self):
'''
The diameter at the middle of each compartment in this sub-section.
'''
return self._morphology.diameter[self._i:self._j]
@property
def distance(self):
'''
The total distance between the midpoint of each compartment in this
sub-section and the root of the morphology.
'''
return self._morphology.distance[self._i:self._j]
@property
def start_x(self):
'''
The x coordinate at the beginning of each compartment in this
sub-section. Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.start_x_)
@property
def start_y(self):
'''
The y coordinate at the beginning of each compartment in this
sub-section. Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.start_y_)
@property
def start_z(self):
'''
The x coordinate at the beginning of each compartment in this
sub-section. Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.start_z_)
@property
def start_x_(self):
'''
The x coordinate (as a unitless floating point number) at the beginning
of each compartment in this sub-section. Returns ``None`` for
morphologies without coordinates.
'''
return _from_morphology(self._morphology.start_x_, self._i, self._j)
@property
def start_y_(self):
'''
The y coordinate (as a unitless floating point number) at the beginning
of each compartment in this sub-section. Returns ``None`` for
morphologies without coordinates.
'''
return _from_morphology(self._morphology.start_y_, self._i, self._j)
@property
def start_z_(self):
'''
The z coordinate (as a unitless floating point number) at the beginning
of each compartment in this sub-section. Returns ``None`` for
morphologies without coordinates.
'''
return _from_morphology(self._morphology.start_z_, self._i, self._j)
@property
def x(self):
'''
The x coordinate at the midpoint of each compartment in this
sub-section. Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.x_)
@property
def y(self):
'''
The y coordinate at the midpoint of each compartment in this
sub-section. Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.y_)
@property
def z(self):
'''
The z coordinate at the midpoint of each compartment in this
sub-section. Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.z_)
@property
def x_(self):
'''
The x coordinate (as a unitless floating point number) at the midpoint
of each compartment in this sub-section. Returns ``None`` for
morphologies without coordinates.
'''
return _from_morphology(self._morphology.x_, self._i, self._j)
@property
def y_(self):
'''
The y coordinate (as a unitless floating point number) at the midpoint
of each compartment in this sub-section. Returns ``None`` for
morphologies without coordinates.
'''
return _from_morphology(self._morphology.y_, self._i, self._j)
@property
def z_(self):
'''
The z coordinate (as a unitless floating point number) at the midpoint
of each compartment in this sub-section. Returns ``None`` for
morphologies without coordinates.
'''
return _from_morphology(self._morphology.z_, self._i, self._j)
@property
def end_x(self):
'''
The x coordinate at the end of each compartment in this sub-section.
Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.end_x_)
@property
def end_y(self):
'''
The y coordinate at the end of each compartment in this sub-section.
Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.end_y_)
@property
def end_z(self):
'''
The z coordinate at the end of each compartment in this sub-section.
Returns ``None`` for morphologies without coordinates.
'''
return _to_meters(self.end_z_)
@property
def end_x_(self):
'''
The x coordinate (as a unitless floating point number) at the end of
each compartment in this sub-section. Returns ``None`` for morphologies
without coordinates.
'''
return _from_morphology(self._morphology.end_x_, self._i, self._j)
@property
def end_y_(self):
'''
The y coordinate (as a unitless floating point number) at the end of
each compartment in this sub-section. Returns ``None`` for morphologies
without coordinates.
'''
return _from_morphology(self._morphology.end_y_, self._i, self._j)
@property
def end_z_(self):
'''
The z coordinate (as a unitless floating point number) at the end of
each compartment in this sub-section. Returns ``None`` for morphologies
without coordinates.
'''
return _from_morphology(self._morphology.end_z_, self._i, self._j)
class Soma(Morphology):
'''
A spherical, iso-potential soma.
Parameters
----------
diameter : `Quantity`
Diameter of the sphere.
x : `Quantity`, optional
The x coordinate of the position of the soma.
y : `Quantity`, optional
The y coordinate of the position of the soma.
z : `Quantity`, optional
The z coordinate of the position of the soma.
type : str, optional
The ``type`` of this section, defaults to ``'soma'``.
'''
@check_units(diameter=meter, x=meter, y=meter, z=meter)
def __init__(self, diameter, x=None, y=None, z=None, type='soma'):
Morphology.__init__(self, n=1, type=type)
if diameter.shape != () and len(diameter) != 1:
raise TypeError('Diameter has to be a scalar value.')
for coord in [x, y, z]:
if coord is not None and coord.shape != () and len(coord) != 1:
raise TypeError('Coordinates have to be scalar values.')
self._diameter = np.ones(1) * diameter
if any(coord is not None for coord in (x, y, z)):
default_value = np.array([0.0])
else:
default_value = None
self._x = np.atleast_1d(np.asarray(x)) if x is not None else default_value
self._y = np.atleast_1d(np.asarray(y)) if y is not None else default_value
self._z = np.atleast_1d(np.asarray(z)) if z is not None else default_value
def __repr__(self):
s = '{klass}(diameter={diam!r}'.format(klass=self.__class__.__name__,
diam=self.diameter[0])
if self._x is not None:
s += ', x={x!r}, y={y!r}, z={z!r}'.format(x=self.x[0],
y=self.y[0],
z=self.z[0])
if self.type != 'soma':
s += ', type={type!r}'.format(type=self.type)
return s + ')'
def copy_section(self):
return Soma(self.diameter, x=self.x, y=self.y, z=self.z,
type=self.type)
# Note that the per-compartment properties should always return 1D arrays,
# i.e. for the soma arrays of length 1 instead of scalar values
@property
def area(self):
'''
The membrane surface area of this section (as an array of length 1).
'''
return np.pi * self.diameter ** 2
@property
def volume(self):
'''
The volume of this section (as an array of length 1).
'''
return (np.pi * self.diameter ** 3)/6
@property
def length(self):
'''
The "length" (equal to `diameter`) of this section (as an array of
length 1).
'''
return self.diameter
@property
def r_length_1(self):
'''
The geometry-dependent term to calculate the conductance between the
start and the midpoint of each compartment. Returns a fixed (high)
value for a `Soma`, corresponding to a section with very low
intracellular resistance.
'''
return [1]*meter
@property
def r_length_2(self):
'''
The geometry-dependent term to calculate the conductance between the
midpoint and the end of each compartment. Returns a fixed (high)
value for a `Soma`, corresponding to a section with very low
intracellular resistance.
'''
return [1]*meter
@property
def diameter(self):
'''
The diameter of this section (as an array of length 1).
'''
return self._diameter
@property
def distance(self):
'''
The total distance between the midpoint of this section and the root
of the morphology. The `Soma` is most likely the root of the
morphology, and therefore the `distance` is 0.
'''
dist = self._parent.distance[-1:] if self._parent is not None else [0]*um
return dist
@property
def start_x_(self):
'''
The x-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._x
@property
def start_y_(self):
'''
The y-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._y
@property
def start_z_(self):
'''
The z-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._z
@property
def x_(self):
'''
The x-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._x
@property
def y_(self):
'''
The y-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._y
@property
def z_(self):
'''
The z-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._z
@property
def end_x_(self):
'''
The x-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._x
@property
def end_y_(self):
'''
The y-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._y
@property
def end_z_(self):
'''
The z-coordinate of the current section (as an array of length 1). Note
that a `Soma` is modelled as a "point" with finite surface/volume,
equivalent to that of a sphere with the given `diameter`. It's start-,
midpoint-, and end-coordinates are therefore identical.
'''
return self._z
@property
def end_distance(self):
'''
The distance to the root of the morphology at the end of this section.
Note that since a `Soma` is modeled as a point (see docs of `x`, etc.),
it does not add anything to the total distance, e.g. a section
connecting to a `Soma` has a `distance` of 0 um at its start.
'''
dist = self._parent.end_distance if self._parent is not None else 0 * um
return dist
class Section(Morphology):
'''
A section (unbranched structure), described as a sequence of truncated
cones with potentially varying diameters and lengths per compartment.
Parameters
----------
diameter : `Quantity`
Either a single value (the constant diameter along the whole section),
or a value of length ``n+1``. When ``n+1`` values are given, they
will be interpreted as the diameters at the start of the first
compartment and the diameters at the end of each compartment (which is
equivalent to: the diameter at the start of each compartment and the
diameter at the end of the last compartment.
n : int, optional
The number of compartments in this section. Defaults to 1.
length : `Quantity`, optional
Either a single value (the total length of the section), or a value of
length ``n``, the length of each individual compartment. Cannot be
combined with the specification of coordinates.
x : `Quantity`, optional
``n+1`` values, specifying the x coordinates of the start point of the
first compartment and the end-points of all compartments (which is
equivalent to: the start point of all compartments and the end point of
the last compartment). The coordinates are interpreted as relative to
the end point of the parent compartment (if any), so in most cases the
start point should be ``0*um``. The common exception is a cylinder
connecting to a `Soma`, here the start point can be used to make the
cylinder start at the surface of the sphere instead of at its center.
You can specify all of ``x``, ``y``, or ``z`` to specify
a morphology in 3D, or only one or two out of them to specify a
morphology in 1D or 2D.
y : `Quantity`, optional
See ``x``
z : `Quantity`, optional
See ``x``
type : str, optional
The type (e.g. ``"axon"``) of this `Section`.
'''
@check_units(n=1, length=meter, diameter=meter, start_diameter=meter,
x=meter, y=meter, z=meter)
def __init__(self, diameter, n=1, length=None, x=None, y=None, z=None,
start_diameter=None, origin=None, type=None):
n = int(n)
Morphology.__init__(self, n=n, type=type)
if diameter.ndim != 1 or len(diameter) != n+1:
raise TypeError('The diameter argument has to be a one-dimensional '
'array of length %d' % (n + 1))
self._diameter = Quantity(diameter, copy=True).reshape((n+1, ))
if ((x is not None or y is not None or z is not None) and
length is not None):
raise TypeError('Cannot specify coordinates and length at the same '
'time.')
if length is not None:
# Length
if length.ndim != 1 or len(length) != n:
raise TypeError('The length argument has to be a '
'one-dimensional array of length %d' % n)
self._length = Quantity(length, copy=True).reshape((n, ))
self._x = self._y = self._z = None
else:
# Coordinates
if x is None and y is None and z is None:
raise TypeError('No length specified, need to specify at least '
'one out of x, y, or z.')
for name, value in [('x', x), ('y', y), ('z', z)]:
if value is not None and (value.ndim != 1 or len(value) != n + 1):
raise TypeError(('%s needs to be a 1-dimensional array '
'of length %d.') % (name, n + 1))
self._x = np.asarray(x).reshape((n+1, )) if x is not None else np.zeros(n + 1)
self._y = np.asarray(y).reshape((n+1, )) if y is not None else np.zeros(n + 1)
self._z = np.asarray(z).reshape((n+1, )) if z is not None else np.zeros(n + 1)
length = np.sqrt((self.end_x - self.start_x) ** 2 +
(self.end_y - self.start_y) ** 2 +
(self.end_z - self.start_z) ** 2)
self._length = length
def __repr__(self):
if all(np.abs(self.end_diameter - self.end_diameter[0]) < self.end_diameter[0]*1e-12):
# Constant diameter
diam = self.end_diameter[0]
else:
diam = np.hstack([np.asarray(self.start_diameter[0]),
np.asarray(self.end_diameter)])*meter
s = '{klass}(diameter={diam!r}'.format(klass=self.__class__.__name__,
diam=diam)
if self.n != 1:
s += ', n={n}'.format(n=self.n)
if self._x is not None:
s += ', x={x!r}, y={y!r}, z={z!r}'.format(x=self._x,
y=self._y,
z=self._z)
else:
s += ', length={length!r}'.format(length=sum(self._length))
if self.type is not None:
s += ', type={type!r}'.format(type=self.type)
return s + ')'
def copy_section(self):
if self.x is None:
x, y, z = None, None, None
length = self.length
else:
x, y, z = self._x*meter, self._y*meter, self._z*meter
length = None
return Section(diameter=self._diameter, n=self.n, x=x, y=y, z=z,
length=length, type=self.type)
@property
def area(self):
r'''
The membrane surface area of each compartment in this section. The
surface area of each compartment is calculated as
:math:`\frac{\pi}{2}(d_1 + d_2)\sqrt{\frac{(d_1 - d_2)^2}{4} + l^2)}`,
where :math:`l` is the length of the compartment, and :math:`d_1` and
:math:`d_2` are the diameter at the start and end of the compartment,
respectively. Note that this surface area does not contain the area of
the two disks at the two sides of the truncated cone.
'''
d_1 = self.start_diameter
d_2 = self.end_diameter
return np.pi/2*(d_1 + d_2)*np.sqrt(((d_1 - d_2)**2)/4 + self._length**2)
@property
def volume(self):
r'''
The volume of each compartment in this section. The volume of each
compartment is calculated as
:math:`\frac{\pi}{12} l (d_1^2 + d_1 d_2 + d_2^2)`,
where :math:`l` is the length of the compartment, and :math:`d_1` and
:math:`d_2` are the diameter at the start and end of the compartment,
respectively.
'''
d_1 = self.start_diameter
d_2 = self.end_diameter
return np.pi * self._length * (d_1**2 + d_1*d_2 + d_2**2)/12
@property
def length(self):
'''
The length of each compartment in this section.
'''
return self._length
@property
def start_diameter(self):
'''
The diameter at the start of each compartment in this section.
'''
return Quantity(self._diameter[:-1], copy=True)
@property
def end_diameter(self):
'''
The diameter at the end of each compartment in this section.
'''
return Quantity(self._diameter[1:], copy=True)
@property
def diameter(self):
'''
The diameter at the middle of each compartment in this section.
'''
d_1 = self.start_diameter
d_2 = self.end_diameter
# Diameter at the center
return 0.5*(d_1 + d_2)
@property
def distance(self):
'''
The total distance between the midpoint of each compartment and the root
of the morphology.
'''
dist = self._parent.end_distance if self._parent is not None else 0 * um
return dist + np.cumsum(self.length) - 0.5 * self.length
@property
def end_distance(self):
'''
The distance to the root of the morphology at the end of this section.
'''
return self.distance[-1] + 0.5 * self.length[-1]
@property
def r_length_1(self):
'''
The geometry-dependent term to calculate the conductance between the
start and the midpoint of each compartment. Dividing this value by the
Intracellular resistivity gives the conductance.
'''
d_1 = self.start_diameter
d_2 = (self.start_diameter + self.end_diameter)*0.5
return np.pi/2 * (d_1 * d_2)/self._length
@property
def r_length_2(self):
'''
The geometry-dependent term to calculate the conductance between the
midpoint and the end of each compartment. Dividing this value by the
Intracellular resistivity gives the conductance.
'''
d_1 = (self.start_diameter + self.end_diameter)*0.5
d_2 = self.end_diameter
return np.pi/2 * (d_1 * d_2)/self._length
@property
def start_x_(self):
'''
The x coordinate (as a unitless floating point number) at the beginning
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
if self._x is None:
return None
parent_end_x = self.parent.end_x_ if self.parent is not None else None
if parent_end_x is not None:
return parent_end_x[-1] + self._x[:-1]
else:
return self._x[:-1]
@property
def start_y_(self):
'''
The y coordinate (as a unitless floating point number) at the beginning
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
if self._y is None:
return None
parent_end_y = self.parent.end_y_ if self.parent is not None else None
if parent_end_y is not None:
return parent_end_y[-1] + self._y[:-1]
else:
return self._y[:-1]
@property
def start_z_(self):
'''
The z coordinate (as a unitless floating point number) at the beginning
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
if self._z is None:
return None
parent_end_z = self.parent.end_z_ if self.parent is not None else None
if parent_end_z is not None:
return parent_end_z[-1] + self._z[:-1]
else:
return self._z[:-1]
@property
def x_(self):
'''
The x coordinate (as a unitless floating point number) at the midpoint
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
if self._x is None:
return None
start_x = self.start_x_
diff_x = (self.end_x_ - start_x)
return start_x + 0.5*diff_x
@property
def y_(self):
'''
The y coordinate (as a unitless floating point number) at the midpoint
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
if self._y is None:
return None
start_y = self.start_y_
diff_y = (self.end_y_ - start_y)
return start_y + 0.5*diff_y
@property
def z_(self):
'''
The z coordinate (as a unitless floating point number) at the midpoint
of each compartment. Returns ``None`` for morphologies without
coordinates.
'''
if self._z is None:
return None
start_z = self.start_z_
diff_z = (self.end_z_ - start_z)
return start_z + 0.5*diff_z
@property
def end_x_(self):
'''
The x coordinate (as a unitless floating point number) at the end of
each compartment. Returns ``None`` for morphologies without coordinates.
'''
if self._x is None:
return None
parent_end_x = self.parent.end_x_ if self.parent is not None else None
if parent_end_x is not None:
return parent_end_x[-1] + self._x[1:]
else:
return self._x[1:]
@property
def end_y_(self):
'''
The y coordinate (as a unitless floating point number) at the end of
each compartment. Returns ``None`` for morphologies without coordinates.
'''
if self._y is None:
return None
parent_end_y = self.parent.end_y_ if self.parent is not None else None
if parent_end_y is not None:
return parent_end_y[-1] + self._y[1:]
else:
return self._y[1:]
@property
def end_z_(self):
'''
The z coordinate (as a unitless floating point number) at the end of
each compartment. Returns ``None`` for morphologies without coordinates.
'''
if self._z is None:
return None
parent_end_z = self.parent.end_z_ if self.parent is not None else None
if parent_end_z is not None:
return parent_end_z[-1] + self._z[1:]
else:
return self._z[1:]
class Cylinder(Section):
'''
A cylindrical section. For sections with more complex geometry (varying
length and/or diameter of each compartment), use the `Section` class.
Parameters
----------
diameter : `Quantity`
The diameter of the cylinder.
n : int, optional
The number of compartments in this section. Defaults to 1.
length : `Quantity`, optional
The length of the cylinder. Cannot be combined with the specification
of coordinates.
x : `Quantity`, optional
A sequence of two values, the start and the end point of the cylinder.
The coordinates are interpreted as relative to the end point of the
parent compartment (if any), so in most cases the start point should
be ``0*um``. The common exception is a cylinder connecting to a `Soma`,
here the start point can be used to make the cylinder start at the
surface of the sphere instead of at its center.
You can specify all of ``x``, ``y``, or ``z`` to specify
a morphology in 3D, or only one or two out of them to specify a
morphology in 1D or 2D.
y : `Quantity`, optional
See ``x``
z : `Quantity`, optional
See ``x``
type : str, optional
The type (e.g. ``"axon"``) of this `Cylinder`.
'''
@check_units(n=1, length=meter, diameter=meter, x=meter, y=meter, z=meter)
def __init__(self, diameter, n=1, length=None, x=None, y=None, z=None,
type=None):
n = int(n)
Morphology.__init__(self, n=n, type=type)
# Diameter
if diameter.shape != () and (diameter.ndim > 1 or len(diameter) != 1):
raise TypeError('The diameter argument has to be a single value.')
diameter = np.ones(n) * diameter
self._diameter = diameter
if ((x is not None or y is not None or z is not None) and
length is not None):
raise TypeError('Cannot specify coordinates and length at the same '
'time.')
if length is not None:
# Length
if length.shape != () and (length.ndim > 1 or len(length) != 1):
raise TypeError('The length argument has to be a single value.')
self._length = np.ones(n) * (length/n) # length was total length
self._x = self._y = self._z = None
else:
# Coordinates
if x is None and y is None and z is None:
raise TypeError('No length specified, need to specify at least '
'one out of x, y, or z.')
for name, value in [('x', x), ('y', y), ('z', z)]:
if value is not None and (value.ndim != 1 or len(value) != 2):
raise TypeError('%s needs to be a 1-dimensional array of '
'length 2 (start and end point)' % name)
self._x = np.asarray(np.linspace(x[0], x[1], n+1)) if x is not None else np.zeros(n+1)
self._y = np.asarray(np.linspace(y[0], y[1], n+1)) if y is not None else np.zeros(n+1)
self._z = np.asarray(np.linspace(z[0], z[1], n+1)) if z is not None else np.zeros(n+1)
length = np.sqrt((self.end_x - self.start_x) ** 2 +
(self.end_y - self.start_y) ** 2 +
(self.end_z - self.start_z) ** 2)
self._length = length
def __repr__(self):
s = '{klass}(diameter={diam!r}'.format(klass=self.__class__.__name__,
diam=self.diameter[0])
if self.n != 1:
s += ', n={n}'.format(n=self.n)
if self._x is not None:
s += ', x={x!r}, y={y!r}, z={z!r}'.format(x=self._x[[0, -1]],
y=self._y[[0, -1]],
z=self._z[[0, -1]])
else:
s += ', length={length!r}'.format(length=sum(self._length))
if self.type is not None:
s += ', type={type!r}'.format(type=self.type)
return s + ')'
def copy_section(self):
if self.x is None:
return Cylinder(self.diameter[0], n=self.n, length=self.length,
type=self.type)
else:
return Cylinder(self.diameter[0], n=self.n,
x=self._x[[0, -1]], y=self._y[[0, -1]], z=self._z[[0, -1]],
type=self.type)
# Overwrite the properties that differ from `Section`
@property
def area(self):
r'''
The membrane surface area of each compartment in this section. The
surface area of each compartment is calculated as
:math:`\pi d l`,
where :math:`l` is the length of the compartment, and :math:`d` is its
diameter. Note that this surface area does not contain the area of
the two disks at the two sides of the cylinder.
'''
return np.pi * self._diameter * self.length
@property
def start_diameter(self):
'''
The diameter at the start of each compartment in this section.
'''
return self._diameter
@property
def diameter(self):
'''
The diameter at the middle of each compartment in this section.
'''
return self._diameter
@property
def end_diameter(self):
'''
The diameter at the end of each compartment in this section.
'''
return self._diameter
@property
def volume(self):
r'''
The volume of each compartment in this section. The volume of each
compartment is calculated as
:math:`\pi \frac{d}{2}^2 l` ,
where :math:`l` is the length of the compartment, and :math:`d` is its
diameter.
'''
return np.pi * (self._diameter/2)**2 * self.length
@property
def r_length_1(self):
'''
The geometry-dependent term to calculate the conductance between the
start and the midpoint of each compartment. Dividing this value by the
Intracellular resistivity gives the conductance.
'''
return np.pi/2 * (self._diameter**2)/self.length
@property
def r_length_2(self):
'''
The geometry-dependent term to calculate the conductance between the
midpoint and the end of each compartment. Dividing this value by the
Intracellular resistivity gives the conductance.
'''
return np.pi/2 * (self._diameter**2)/self.length
| 38.323278 | 105 | 0.569423 |
5846619656455af3a1c66349f2a5d9d0ac0ea4e1 | 377 | py | Python | modules/time/timedelta.py | PraghadeshManivannan/Python | 7a42269c5f8cfd9178f1ed39ffc2afea1dd6c5aa | [
"MIT"
] | null | null | null | modules/time/timedelta.py | PraghadeshManivannan/Python | 7a42269c5f8cfd9178f1ed39ffc2afea1dd6c5aa | [
"MIT"
] | null | null | null | modules/time/timedelta.py | PraghadeshManivannan/Python | 7a42269c5f8cfd9178f1ed39ffc2afea1dd6c5aa | [
"MIT"
] | null | null | null | from datetime import time,date,timedelta
time_1 = time()
print('Time_1: {}'.format(time_1))
time_2 = time(hour=10,minute=45,second=47,microsecond=123458)
print('Time_2: {}'.format(time_2))
today = date.today()
outbreak = date(2002,11,10)
daysSinceOutbreak = today - outbreak
print(daysSinceOutbreak)
tomorrow = today + timedelta(seconds=86400)
print(tomorrow) | 25.133333 | 62 | 0.724138 |
b4c657c572bc2f244c70fb665fc34dd450042729 | 185 | py | Python | PythonMundoDois/ex061.py | HendrylNogueira/CursoPython3 | c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f | [
"MIT"
] | null | null | null | PythonMundoDois/ex061.py | HendrylNogueira/CursoPython3 | c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f | [
"MIT"
] | null | null | null | PythonMundoDois/ex061.py | HendrylNogueira/CursoPython3 | c3d9d4e2a27312b83d744aaf0f8d01b26e6faf4f | [
"MIT"
] | null | null | null | termo = int(input('Digite o termo: '))
razao = int(input('Digite a razão: '))
cont = 0
while cont <= 10:
print(f'{termo} -> ', end='')
termo += razao
cont +=1
print('Fim.')
| 20.555556 | 38 | 0.556757 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.