blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd9080a31750fd6fb0f6169f6b3a521c68c2b600 | 501615c82801733e69c7447ab9fd68d3883ed947 | /hotfix/.svn/pristine/cd/cd9080a31750fd6fb0f6169f6b3a521c68c2b600.svn-base | 3edb65fecab8efada344d05e730544d8fd6e0929 | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | #!/usr/bin/env python
# -*- coding: utf8 -*-
from django.db import models
class RecommendKeywords(models.Model):
name = models.CharField(u'推荐搜索关键词', max_length=50)
class Meta:
verbose_name = u'推荐搜索关键词'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
| [
"1461847795@qq.com"
] | 1461847795@qq.com | |
fed1270ad3e023a09ad4a6c34adf894036258543 | 9848a719ddfdd21b5fe1fa2f55da290c0f6952dc | /lib/RollHash.py | 256aaff82909156e295b6cb35458d089751d67f4 | [] | no_license | maomao905/algo | 725f7fe27bb13e08049693765e4814b98fb0065a | 84b35ec9a4e4319b29eb5f0f226543c9f3f47630 | refs/heads/master | 2023-03-13T12:38:55.401373 | 2021-03-25T01:55:48 | 2021-03-25T01:55:48 | 351,278,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | A = 'abcd'
B = 'dacbabcdaacd'
W, MOD = 26, 2**32
L = len(A)
h = 0
for i in range(len(A)):
h = (h * W + ord(A[i])) % MOD
b_h = 0
WL = pow(W,L,MOD)
for i in range(len(B)):
if i < len(A):
b_h = (b_h * W + ord(B[i])) % MOD
else:
b_h = (b_h * W - ord(B[i-L]) * WL + ord(B[i])) % MOD
if b_h == h:
return True
| [
"maoya.sato@gmail.com"
] | maoya.sato@gmail.com |
c22c92dfe26cffb6193e8818d239bc2767418d4f | e755453c853ae400d94f562ad215b59166b63782 | /tests/splay_tests/test_prev.py | 68c2817fe7eda9dc6a0bce3cc6dcdd77fe0ba5d6 | [
"MIT"
] | permissive | lycantropos/dendroid | 0cb3e276dd9c476b82b0b7a17c25c2e05616a993 | fd11c74a395eb791caf803c848805569869080f6 | refs/heads/master | 2023-04-07T11:07:55.550796 | 2023-03-27T00:46:03 | 2023-03-27T00:46:03 | 215,369,321 | 0 | 1 | MIT | 2020-09-24T05:02:02 | 2019-10-15T18:29:36 | Python | UTF-8 | Python | false | false | 547 | py | from typing import Tuple
from hypothesis import given
from dendroid.hints import Value
from tests.utils import (BaseSet,
are_keys_equal,
set_value_to_key)
from . import strategies
@given(strategies.non_empty_sets_with_their_non_min_values)
def test_properties(set_with_value: Tuple[BaseSet, Value]) -> None:
set_, value = set_with_value
result = set_.prev(value)
assert are_keys_equal(set_value_to_key(set_, result), set_.tree.root.key)
assert result is set_.tree.root.value
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
1d51d837e727c28b3f81bbbc0c956636983839e0 | b28300f3af1517e149baeadd9f87d92e56e23ba0 | /pages/forms.py | 65907867023b70c0f348bf0a17bbf9029e3fe93f | [] | no_license | xarala221/myportfolio | f39ea13fe493d4d3a7525774d568daa099a51cd0 | a62be57414b0971157a9923c17ec8bf5c9524823 | refs/heads/master | 2022-12-14T23:00:42.540391 | 2018-06-29T01:27:39 | 2018-06-29T01:27:39 | 138,381,453 | 2 | 0 | null | 2022-12-08T02:14:30 | 2018-06-23T07:39:53 | JavaScript | UTF-8 | Python | false | false | 781 | py | from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
email = forms.EmailField(label='Email :', widget=forms.EmailInput(attrs={'placeholder': 'Your Email', 'class': 'form-control ', 'required': 'True'}))
name = forms.CharField(label='Name :', widget=forms.TextInput(attrs={'placeholder': 'Your Name', 'class': 'form-control ', 'required': 'True'}))
message = forms.CharField(label='Message :', widget=forms.Textarea(attrs={'placeholder': 'What you want to say? Are You have a project? or you need help?', 'class': 'form-control ', 'required': 'True'}))
class Meta:
model = Contact
fields = ['name', 'email', 'message']
def clean_email(self, *args, **kwargs):
email = self.cleaned_data.get("email")
return email | [
"xaralaxarala@gmail.com"
] | xaralaxarala@gmail.com |
c340916b2367f30fe565bb515bbbbf87eb4445e3 | acad4e69e68354311e6b82f70d713e5c47d30cf8 | /User_version/Chapter1_Pre-requisit/Section1_math/01_solution.py | c1513df4d29e7b25e65b9fd7de36fa04bd169bf1 | [
"MIT"
] | permissive | MacHu-GWU/Data-Science-in-Python | fabe7232349cec78a4cded4d930a66fc5e362a2c | a3042864fae6c764d031817c7d7f5ef4ee7251cb | refs/heads/master | 2020-04-06T07:02:54.415759 | 2018-08-04T16:56:27 | 2018-08-04T16:56:27 | 22,156,078 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | ##coding=utf8
##author=Sanhe
##date=07-13-2014
'''
Let's take a look at how long your codes takes.
Please copy your codes to the function "combination(n,k)", and run.
Usually the solution is 4-10 times faster than naive implementation.
WHY?
Time complexity
c(n,k) = n! / (k! * (n-k)! )
n + k + n-k = 2n times multiply
= [ n * (n-1) * ... * (n-k+1) ] / k!
k + k = 2k times multiply
Memory complexity
for i in xrange(n) is better than for i in range(n)
Because, range(5) is to generate a "n" length list in
memory and then cumulatively do the multiply
But, xrange(n) is to generate only one number each time
and then do the mutiply. So when n is large, you save
a lot of memory.
'''
import time
import math
def combination(n,k,answer = -999):
''' Copy your codes down here
'''
return answer
def combination1(n, k):
""" A fast way to calculate combination.
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1): ## high performance generator
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
def test():
n, k = 500, 188 ## <=== test data
st = time.clock()
answer = combination(n,k)
print '\nyour codes time elapse: %s' % (time.clock() - st)
print 'c(%s,%s) = %s' % (n, k, answer)
st = time.clock()
answer1 = combination1(n,k)
print '\nsolution time elapse: %s' % (time.clock() - st)
print 'c(%s,%s) = %s' % (n, k, answer1)
if answer != answer1:
print ('\nMaybe you forget to paste your own codes to "def combination(n,k,answer=0):"'
'\n or your code has something wrong.')
if __name__ == '__main__':
test() | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
d7c70f9ad07e408f22db366edf4d3645a22ed5d4 | 3f7d819c918fe185bfb2c96bdd85b394e3b3b42c | /12-4-15.py | fffc1d210b97cd33177e4c8571206576d5467b74 | [] | no_license | daxaxelrod/notes-from-3rd-semester | 1b04a1c8bba458c9af226d190ebdfa6bb31cef62 | 171f272bd57c7989f999dc799f1fa1b3ca88f5d9 | refs/heads/master | 2021-01-10T12:49:05.632358 | 2015-12-14T22:19:40 | 2015-12-14T22:19:40 | 47,573,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | 12-4-15 class notes
quick thing on zeroth law of thermodynamics
validates idea of tempertature
2 objects in thermal equalirium with a 3rd object are equal to eachother
Chapter 19
Kinetic Theory of gasses
def review():
look over the first two setions of chater 19
pressure volume equals N R tempertature
if you hold pressure constant
volme is proportional to tempertature
pressure Volume is proportional to the tempertature of the system
PV = N*R*T
n = number of moles in the system
r = gas constant has a value of 8.31 joules per mole kelvin (j/ mol*K)
Pv = N * k * T
n is the same
k is the boltzmann constant == 1.38 times 10 ^ -23 j/k
make sure you are always working with kelvin
add 273 to the celius value
Ideal Gasses and work
Isothermal process (constant temperature) work:
pv =nrt where pressure = nrt/V
work is the integral from Vi to Vf of pressure d volume
plug in pressure from above
bam
we are left integrating 1/V
work then equals
n * R * T (ln V) from vi to vf
Word done for isothermic = nrt ln(Vf/Vi)
remember in constant volume
work is 0
isobaric work equal pressure(vf-vi)
Constant temperature process
we get a curve were pressume is proportional to 1 / V
Specific heat
recall
Q = cm detla temperature c is specific heat, m is mass
can also write in moles but pay attention to units bro
for constant volume case
Q = c n delta t c equals 12.5 moles kelvin
delta E = Q minus work
for constant volume case
the way to account for internal energy change isisssssss is
delta e = Molar specific heat *moles * delta temperature
path independant
this can be used for change in internal energy
all processes with same delta t have the same #!/usr/bin/env python
for constant pressuure case:
Q = molar specific heat at constant pressure * n * delta t
where molar blah blah is 20.8 j / mol k
at constant pressure, work = p delta v
can also write n * R * delta t
any change has to come from the temperature
Recall first law
delta e is molar thing *n * delta t
constant specific heat for volume * n delta t
===
constant specific heat for pressure
Cp - Cv = R
adiabatic means that there is no heat exchange
bc internal enrgy is path independant (thing potential with grav)
use delta E = cv *n * delta t
| [
"daxaxelrod@gmail.com"
] | daxaxelrod@gmail.com |
7f34e8fac9c89052bcd5c9d0ab669942016c0277 | a3ed36806067cecb5c7aaa5dcfe4a0e4163a0742 | /tests/unit/test_base.py | ca8ad6a430909b83a6a161825458f6c183d37108 | [
"MIT"
] | permissive | gabrielfalcao/plant | 02f14055439947fa4c56f58c8b9f539b8eb7f559 | 6d9122470fd6ad3b89957f0dcbc6bdeedb46ca9b | refs/heads/master | 2021-08-28T16:00:45.533014 | 2017-09-08T17:55:39 | 2017-09-08T17:55:39 | 11,139,306 | 6 | 4 | NOASSERTION | 2019-12-20T17:39:56 | 2013-07-03T01:37:50 | Python | UTF-8 | Python | false | false | 1,435 | py | # -*- coding: utf-8 -*-
# <plant - filesystem for humans>
# Copyright (C) <2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from mock import patch
from plant.core import isfile, isdir
@patch('plant.core.isdir_base')
def test_isdir_when_exists(isdir_base):
("plant.core.isdir should return os.path.isdir when given path exists")
isdir_base.return_value = "yeah!"
isdir("/foo", True).should.equal("yeah!")
isdir_base.assert_called_once_with("/foo")
@patch('plant.core.isfile_base')
def test_isfile_when_exists(isfile_base):
("plant.core.isfile should return os.path.isfile when given path exists")
isfile_base.return_value = "yeah!"
isfile("/foo", True).should.equal("yeah!")
isfile_base.assert_called_once_with("/foo")
| [
"gabriel@nacaolivre.org"
] | gabriel@nacaolivre.org |
bf8ca3cf4733a29992a93f9a401b418c5c3aeb68 | b6699361cea596afbafcff40056e12a3ccadb590 | /complexconfig_python/complexconfig-0.2/complexconfig/parser/dict_parser.py | 5605448e1ff53b86678150ab5608dcb74eec378e | [
"Apache-2.0"
] | permissive | benhe119/python_lib | 4c6ba3468ef380eadc5ab65401052aba224801db | e2d4052de04c82cb7bccd08042f28db824cab442 | refs/heads/master | 2020-08-09T10:09:59.368521 | 2019-03-29T02:21:55 | 2019-03-29T02:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # -*- coding: utf8 -*-
from __future__ import print_function, unicode_literals
from . import BaseParser
class DictParser(BaseParser):
"""
Parse config from dictionary.
This is a specific parser which parse code from python dictionary instead of text
"""
def parse(self, data):
return data
def dump(self, config):
return config
| [
"zengjinping@threathunter.cn"
] | zengjinping@threathunter.cn |
1002ed5925c93eea4d3464b1096616a44537c652 | 9fbaed9e3ff1bf237b2a75ecaa717951051f4115 | /virtual/bin/wheel | fd1b9a8dd61e0f3d199ef018086988e48d0c7c1c | [] | no_license | DK-denno/dubiez-jumia | f269f9eacba1bc67d234047f36920aba21afa340 | 0e5b4562959c10429fbfaed862c064eb02e692a1 | refs/heads/master | 2023-05-21T23:25:15.782401 | 2021-06-08T21:41:15 | 2021-06-08T21:41:15 | 375,151,695 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/root/Documents/jumia/dkJumia/virtual/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dennisveer27@gmail.com"
] | dennisveer27@gmail.com | |
2b930935afffa61c5d4c9f30f27866be050a73e7 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/generated_clients/apis/servicedirectory/v1beta1/servicedirectory_v1beta1_messages.py | 6be6c2e2c043d9279ed1ec3977358239b639fea2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 97,877 | py | """Generated message classes for servicedirectory version v1beta1.
Service Directory is a platform for discovering, publishing, and connecting
services.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'servicedirectory'
class Asset(_messages.Message):
r"""Asset associated with Service Directory resource.
Fields:
resource: Required. Schemaless uri for the component Examples :
//compute.googleapis.com/projects/1234/zones/us-
east1-c/instanceGroups/mig1
//compute.googleapis.com/projects/1234/regions/us-
east1/forwardingRules/fr1
"""
resource = _messages.StringField(1)
class Attributes(_messages.Message):
r"""Attributes are structured data attached to a given resource.
Fields:
managedRegistration: Output only. Indicates whether a GCP product or
service manages this resource. When a resource is fully managed by
another GCP product or system the information in Service Directory is
read-only. The source of truth is the relevant GCP product or system
which is managing the resource. The Service Directory resource will be
updated or deleted as appropriate to reflect the state of the underlying
`origin_resource`. Note: The `origin_resource` can be found in the
endpoint(s) associated with this service.
pscConnectionId: Optional. The Private Service Connect connection id of
the Private Service Connect Forwarding Rule. This field should be unset
if the service is not a Private Service Connect service.
pscTarget: Optional. The target resource for the Private Service Connect
service. This field should be unset if the service is not a Private
Service Connect service. For a Private Service Connect service accessing
managed services, this is the URI of the service producer's service
attachment. For a Private Service Connect service accessing Google APIs,
this will be the name of the Google API bundle. See the [Private Service
Connect documentation](https://cloud.google.com/vpc/docs/private-
service-connect) for more information.
"""
managedRegistration = _messages.BooleanField(1)
pscConnectionId = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
pscTarget = _messages.StringField(3)
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
"ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
"audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For
sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts `jose@example.com` from DATA_READ logging, and
`aliya@example.com` from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from
DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
ignoreChildExemptions: A boolean attribute.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
ignoreChildExemptions = _messages.BooleanField(2)
logType = _messages.EnumField('LogTypeValueValuesEnum', 3)
class AuthorizationLoggingOptions(_messages.Message):
r"""Authorization-related information used by Cloud Audit Logging.
Enums:
PermissionTypeValueValuesEnum: The type of the permission that was
checked.
Fields:
permissionType: The type of the permission that was checked.
"""
class PermissionTypeValueValuesEnum(_messages.Enum):
r"""The type of the permission that was checked.
Values:
PERMISSION_TYPE_UNSPECIFIED: Default. Should not be used.
ADMIN_READ: A read of admin (meta) data.
ADMIN_WRITE: A write of admin (meta) data.
DATA_READ: A read of standard data.
DATA_WRITE: A write of standard data.
"""
PERMISSION_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
ADMIN_WRITE = 2
DATA_READ = 3
DATA_WRITE = 4
permissionType = _messages.EnumField('PermissionTypeValueValuesEnum', 1)
class Binding(_messages.Message):
r"""Associates `members`, or principals, with a `role`.
Fields:
bindingId: A string attribute.
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the principals in this
binding. To learn which resources support conditions in their IAM
policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the principals requesting access for a Google Cloud
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. Does not include identities that come from
external identity providers (IdPs) through identity federation. *
`user:{emailid}`: An email address that represents a specific Google
account. For example, `alice@example.com` . *
`serviceAccount:{emailid}`: An email address that represents a Google
service account. For example, `my-other-
app@appspot.gserviceaccount.com`. *
`serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`:
An identifier for a [Kubernetes service
account](https://cloud.google.com/kubernetes-engine/docs/how-
to/kubernetes-service-accounts). For example, `my-
project.svc.id.goog[my-namespace/my-kubernetes-sa]`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `admins@example.com`. * `domain:{domain}`: The G Suite domain
(primary) that represents all the users of that domain. For example,
`google.com` or `example.com`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `alice@example.com?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`admins@example.com?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding.
role: Role that is assigned to the list of `members`, or principals. For
example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"""
bindingId = _messages.StringField(1)
condition = _messages.MessageField('Expr', 2)
members = _messages.StringField(3, repeated=True)
role = _messages.StringField(4)
class CloudAuditOptions(_messages.Message):
r"""Write a Cloud Audit log
Enums:
LogNameValueValuesEnum: The log_name to populate in the Cloud Audit
Record.
Fields:
authorizationLoggingOptions: Information used by the Cloud Audit Logging
pipeline.
logName: The log_name to populate in the Cloud Audit Record.
"""
class LogNameValueValuesEnum(_messages.Enum):
r"""The log_name to populate in the Cloud Audit Record.
Values:
UNSPECIFIED_LOG_NAME: Default. Should not be used.
ADMIN_ACTIVITY: Corresponds to "cloudaudit.googleapis.com/activity"
DATA_ACCESS: Corresponds to "cloudaudit.googleapis.com/data_access"
"""
UNSPECIFIED_LOG_NAME = 0
ADMIN_ACTIVITY = 1
DATA_ACCESS = 2
authorizationLoggingOptions = _messages.MessageField('AuthorizationLoggingOptions', 1)
logName = _messages.EnumField('LogNameValueValuesEnum', 2)
class Condition(_messages.Message):
r"""A condition to be met.
Enums:
IamValueValuesEnum: Trusted attributes supplied by the IAM system.
OpValueValuesEnum: An operator to apply the subject with.
SysValueValuesEnum: Trusted attributes supplied by any service that owns
resources and uses the IAM system for access control.
Fields:
iam: Trusted attributes supplied by the IAM system.
op: An operator to apply the subject with.
svc: Trusted attributes discharged by the service.
sys: Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
values: The objects of the condition.
"""
class IamValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by the IAM system.
Values:
NO_ATTR: Default non-attribute.
AUTHORITY: Either principal or (if present) authority selector.
ATTRIBUTION: The principal (even if an authority selector is present),
which must only be used for attribution, not authorization.
SECURITY_REALM: Any of the security realms in the IAMContext
(go/security-realms). When used with IN, the condition indicates "any
of the request's realms match one of the given values; with NOT_IN,
"none of the realms match any of the given values". Note that a value
can be: - 'self:campus' (i.e., clients that are in the same campus) -
'self:metro' (i.e., clients that are in the same metro) - 'self:cloud-
region' (i.e., allow connections from clients that are in the same
cloud region) - 'self:prod-region' (i.e., allow connections from
clients that are in the same prod region) - 'guardians' (i.e., allow
connections from its guardian realms. See go/security-realms-
glossary#guardian for more information.) - 'self' [DEPRECATED] (i.e.,
allow connections from clients that are in the same security realm,
which is currently but not guaranteed to be campus-sized) - a realm
(e.g., 'campus-abc') - a realm group (e.g., 'realms-for-borg-cell-xx',
see: go/realm-groups) A match is determined by a realm group
membership check performed by a RealmAclRep object (go/realm-acl-
howto). It is not permitted to grant access based on the *absence* of
a realm, so realm conditions can only be used in a "positive" context
(e.g., ALLOW/IN or DENY/NOT_IN).
APPROVER: An approver (distinct from the requester) that has authorized
this request. When used with IN, the condition indicates that one of
the approvers associated with the request matches the specified
principal, or is a member of the specified group. Approvers can only
grant additional access, and are thus only used in a strictly positive
context (e.g. ALLOW/IN or DENY/NOT_IN).
JUSTIFICATION_TYPE: What types of justifications have been supplied with
this request. String values should match enum names from
security.credentials.JustificationType, e.g. "MANUAL_STRING". It is
not permitted to grant access based on the *absence* of a
justification, so justification conditions can only be used in a
"positive" context (e.g., ALLOW/IN or DENY/NOT_IN). Multiple
justifications, e.g., a Buganizer ID and a manually-entered reason,
are normal and supported.
CREDENTIALS_TYPE: What type of credentials have been supplied with this
request. String values should match enum names from
security_loas_l2.CredentialsType - currently, only
CREDS_TYPE_EMERGENCY is supported. It is not permitted to grant access
based on the *absence* of a credentials type, so the conditions can
only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
CREDS_ASSERTION: EXPERIMENTAL -- DO NOT USE. The conditions can only be
used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
"""
NO_ATTR = 0
AUTHORITY = 1
ATTRIBUTION = 2
SECURITY_REALM = 3
APPROVER = 4
JUSTIFICATION_TYPE = 5
CREDENTIALS_TYPE = 6
CREDS_ASSERTION = 7
class OpValueValuesEnum(_messages.Enum):
r"""An operator to apply the subject with.
Values:
NO_OP: Default no-op.
EQUALS: DEPRECATED. Use IN instead.
NOT_EQUALS: DEPRECATED. Use NOT_IN instead.
IN: The condition is true if the subject (or any element of it if it is
a set) matches any of the supplied values.
NOT_IN: The condition is true if the subject (or every element of it if
it is a set) matches none of the supplied values.
DISCHARGED: Subject is discharged
"""
NO_OP = 0
EQUALS = 1
NOT_EQUALS = 2
IN = 3
NOT_IN = 4
DISCHARGED = 5
class SysValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
Values:
NO_ATTR: Default non-attribute type
REGION: Region of the resource
SERVICE: Service name
NAME: Resource name
IP: IP address of the caller
"""
NO_ATTR = 0
REGION = 1
SERVICE = 2
NAME = 3
IP = 4
iam = _messages.EnumField('IamValueValuesEnum', 1)
op = _messages.EnumField('OpValueValuesEnum', 2)
svc = _messages.StringField(3)
sys = _messages.EnumField('SysValueValuesEnum', 4)
values = _messages.StringField(5, repeated=True)
class ContactInfo(_messages.Message):
r"""Contact information for the functionality. Used to refer to owners of
service or workload resource for different personas.
Fields:
channel: Optional. Channel to use to reach the contact, eg. pager, email
displayName: Optional. Display name of the contact.
email: Optional. Email of the contact.
"""
channel = _messages.StringField(1)
displayName = _messages.StringField(2)
email = _messages.StringField(3)
class CounterOptions(_messages.Message):
r"""Increment a streamz counter with the specified metric and field names.
Metric names should start with a '/', generally be lowercase-only, and end
in "_count". Field names should not contain an initial slash. The actual
exported metric names will have "/iam/policy" prepended. Field names
correspond to IAM request parameters and field values are their respective
values. Supported field names: - "authority", which is "[token]" if
IAMContext.token is present, otherwise the value of
IAMContext.authority_selector if present, and otherwise a representation of
IAMContext.principal; or - "iam_principal", a representation of
IAMContext.principal even if a token or authority selector is present; or -
"" (empty string), resulting in a counter with no fields. Examples: counter
{ metric: "/debug_access_count" field: "iam_principal" } ==> increment
counter /iam/policy/debug_access_count {iam_principal=[value of
IAMContext.principal]}
Fields:
customFields: Custom fields.
field: The field value to attribute.
metric: The metric to update.
"""
customFields = _messages.MessageField('CustomField', 1, repeated=True)
field = _messages.StringField(2)
metric = _messages.StringField(3)
class CustomField(_messages.Message):
r"""Custom fields. These can be used to create a counter with arbitrary
field/value pairs. See: go/rpcsp-custom-fields.
Fields:
name: Name is the field name.
value: Value is the field value. It is important that in contrast to the
CounterOptions.field, the value here is a constant that is not derived
from the IAMContext.
"""
name = _messages.StringField(1)
value = _messages.StringField(2)
class DataAccessOptions(_messages.Message):
r"""Write a Data Access (Gin) log
Enums:
LogModeValueValuesEnum:
Fields:
logMode: A LogModeValueValuesEnum attribute.
"""
class LogModeValueValuesEnum(_messages.Enum):
r"""LogModeValueValuesEnum enum type.
Values:
LOG_MODE_UNSPECIFIED: Client is not required to write a partial Gin log
immediately after the authorization check. If client chooses to write
one and it fails, client may either fail open (allow the operation to
continue) or fail closed (handle as a DENY outcome).
LOG_FAIL_CLOSED: The application's operation in the context of which
this authorization check is being made may only be performed if it is
successfully logged to Gin. For instance, the authorization library
may satisfy this obligation by emitting a partial log entry at
authorization check time and only returning ALLOW to the application
if it succeeds. If a matching Rule has this directive, but the client
has not indicated that it will honor such requirements, then the IAM
check will result in authorization failure by setting
CheckPolicyResponse.success=false.
"""
LOG_MODE_UNSPECIFIED = 0
LOG_FAIL_CLOSED = 1
logMode = _messages.EnumField('LogModeValueValuesEnum', 1)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
"""
class Endpoint(_messages.Message):
r"""An individual endpoint that provides a service. The service must already
exist to create an endpoint.
Messages:
MetadataValue: Optional. Metadata for the endpoint. This data can be
consumed by service clients. Restrictions: * The entire metadata
dictionary may contain up to 512 characters, spread accoss all key-value
pairs. Metadata that goes beyond this limit are rejected * Valid
metadata keys have two segments: an optional prefix and name, separated
by a slash (/). The name segment is required and must be 63 characters
or less, beginning and ending with an alphanumeric character
([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and
alphanumerics between. The prefix is optional. If specified, the prefix
must be a DNS subdomain: a series of DNS labels separated by dots (.),
not longer than 253 characters in total, followed by a slash (/).
Metadata that fails to meet these requirements are rejected Note: This
field is equivalent to the `annotations` field in the v1 API. They have
the same syntax and read/write to the same location in Service
Directory.
Fields:
address: Optional. An IPv4 or IPv6 address. Service Directory rejects bad
addresses like: * `8.8.8` * `8.8.8.8:53` * `test:bad:address` * `[::1]`
* `[::1]:8080` Limited to 45 characters.
attributes: Optional. Attributes associated with this Endpoint.
createTime: Output only. The timestamp when the endpoint was created.
fqdn: Optional. A fully qualified domain name address. Service Directory
rejects special characters outside of letters, numbers, dots, and
hyphens. FQDN is formatted as [hostname].[domain].[tld] eg:
mail.google.com
metadata: Optional. Metadata for the endpoint. This data can be consumed
by service clients. Restrictions: * The entire metadata dictionary may
contain up to 512 characters, spread accoss all key-value pairs.
Metadata that goes beyond this limit are rejected * Valid metadata keys
have two segments: an optional prefix and name, separated by a slash
(/). The name segment is required and must be 63 characters or less,
beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with
dashes (-), underscores (_), dots (.), and alphanumerics between. The
prefix is optional. If specified, the prefix must be a DNS subdomain: a
series of DNS labels separated by dots (.), not longer than 253
characters in total, followed by a slash (/). Metadata that fails to
meet these requirements are rejected Note: This field is equivalent to
the `annotations` field in the v1 API. They have the same syntax and
read/write to the same location in Service Directory.
name: Immutable. The resource name for the endpoint in the format
`projects/*/locations/*/namespaces/*/services/*/endpoints/*`.
network: Immutable. The Google Compute Engine network (VPC) of the
endpoint in the format `projects//locations/global/networks/*`. The
project must be specified by project number (project id is rejected).
Incorrectly formatted networks are rejected, but no other validation is
performed on this field (ex. network or project existence, reachability,
or permissions).
port: Optional. Service Directory rejects values outside of `[0, 65535]`.
uid: Output only. A globally unique identifier (in UUID4 format) for this
endpoint.
updateTime: Output only. The timestamp when the endpoint was last updated.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Optional. Metadata for the endpoint. This data can be consumed by
service clients. Restrictions: * The entire metadata dictionary may
contain up to 512 characters, spread accoss all key-value pairs. Metadata
that goes beyond this limit are rejected * Valid metadata keys have two
segments: an optional prefix and name, separated by a slash (/). The name
segment is required and must be 63 characters or less, beginning and
ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-),
underscores (_), dots (.), and alphanumerics between. The prefix is
optional. If specified, the prefix must be a DNS subdomain: a series of
DNS labels separated by dots (.), not longer than 253 characters in total,
followed by a slash (/). Metadata that fails to meet these requirements
are rejected Note: This field is equivalent to the `annotations` field in
the v1 API. They have the same syntax and read/write to the same location
in Service Directory.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
address = _messages.StringField(1)
attributes = _messages.MessageField('EndpointAttributes', 2)
createTime = _messages.StringField(3)
fqdn = _messages.StringField(4)
metadata = _messages.MessageField('MetadataValue', 5)
name = _messages.StringField(6)
network = _messages.StringField(7)
port = _messages.IntegerField(8, variant=_messages.Variant.INT32)
uid = _messages.StringField(9)
updateTime = _messages.StringField(10)
class EndpointAttributes(_messages.Message):
r"""Attributes associated with endpoints.
Enums:
KubernetesResourceTypeValueValuesEnum: Optional. Kubernetes resource-type
associated with this endpoint
Fields:
gcpFleetMembership: Optional. Membership URI (scheme-less URI) for
resources registered to Google Cloud Fleet. Currently populated only for
kubernetes resources. Sample URI: `//gkehub.googleapis.com/projects/my-
project/locations/global/memberships/my-membership`
kubernetesResourceType: Optional. Kubernetes resource-type associated with
this endpoint
managedRegistration: Output only. Indicates whether a GCP product or
service manages this resource. When a resource is fully managed by
another GCP product or system the information in Service Directory is
read-only. The source of truth is the relevant GCP product or system
which is managing the resource. The Service Directory resource will be
updated or deleted as appropriate to reflect the state of the underlying
`origin_resource`.
originResource: Optional. Reference to the underlying resource that this
endpoint represents. This should be the full name of the resource that
this endpoint was created from.
region: Optional. Region of the underlying resource, or "global" for
global resources.
zone: Optional. GCP zone of the underlying resource. Meant to be populated
only for zonal resources, left unset for others.
"""
class KubernetesResourceTypeValueValuesEnum(_messages.Enum):
r"""Optional. Kubernetes resource-type associated with this endpoint
Values:
KUBERNETES_RESOURCE_TYPE_UNSPECIFIED: Not a Kubernetes workload.
KUBERNETES_RESOURCE_TYPE_CLUSTER_IP: Cluster IP service related resource
KUBERNETES_RESOURCE_TYPE_NODE_PORT: Node port service related resource
KUBERNETES_RESOURCE_TYPE_LOAD_BALANCER: Load balancer service related
resource
KUBERNETES_RESOURCE_TYPE_HEADLESS: Headless service related resource
"""
KUBERNETES_RESOURCE_TYPE_UNSPECIFIED = 0
KUBERNETES_RESOURCE_TYPE_CLUSTER_IP = 1
KUBERNETES_RESOURCE_TYPE_NODE_PORT = 2
KUBERNETES_RESOURCE_TYPE_LOAD_BALANCER = 3
KUBERNETES_RESOURCE_TYPE_HEADLESS = 4
gcpFleetMembership = _messages.StringField(1)
kubernetesResourceType = _messages.EnumField('KubernetesResourceTypeValueValuesEnum', 2)
managedRegistration = _messages.BooleanField(3)
originResource = _messages.StringField(4)
region = _messages.StringField(5)
zone = _messages.StringField(6)
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GetIamPolicyRequest(_messages.Message):
r"""Request message for `GetIamPolicy` method.
Fields:
options: OPTIONAL: A `GetPolicyOptions` object for specifying options to
`GetIamPolicy`.
"""
options = _messages.MessageField('GetPolicyOptions', 1)
class GetPolicyOptions(_messages.Message):
r"""Encapsulates settings provided to GetIamPolicy.
Fields:
requestedPolicyVersion: Optional. The maximum policy version that will be
used to format the policy. Valid values are 0, 1, and 3. Requests
specifying an invalid value will be rejected. Requests for policies with
any conditional role bindings must specify version 3. Policies with no
conditional role bindings may specify any valid value or leave the field
unset. The policy in the response might use the policy version that you
specified, or it might use a lower policy version. For example, if you
specify version 3, but the policy has no conditional role bindings, the
response uses version 1. To learn which resources support conditions in
their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
class InternalAttributes(_messages.Message):
r"""Attributes associated with workload.
Enums:
ManagerTypeValueValuesEnum: Output only. The GCP resource/product
responsible for this workload.
Fields:
managedRegistration: Output only. Defines if Workload is managed.
managerType: Output only. The GCP resource/product responsible for this
workload.
"""
class ManagerTypeValueValuesEnum(_messages.Enum):
r"""Output only. The GCP resource/product responsible for this workload.
Values:
TYPE_UNSPECIFIED: Default. Should not be used.
GKE_HUB: Resource managed by GKE Hub.
BACKEND_SERVICE: Resource managed by Arcus, Backend Service
"""
TYPE_UNSPECIFIED = 0
GKE_HUB = 1
BACKEND_SERVICE = 2
managedRegistration = _messages.BooleanField(1)
managerType = _messages.EnumField('ManagerTypeValueValuesEnum', 2)
class ListEndpointsResponse(_messages.Message):
r"""The response message for RegistrationService.ListEndpoints.
Fields:
endpoints: The list of endpoints.
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results in the list.
"""
endpoints = _messages.MessageField('Endpoint', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListNamespacesResponse(_messages.Message):
r"""The response message for RegistrationService.ListNamespaces.
Fields:
namespaces: The list of namespaces.
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results in the list.
"""
namespaces = _messages.MessageField('Namespace', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListServicesResponse(_messages.Message):
r"""The response message for RegistrationService.ListServices.
Fields:
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results in the list.
services: The list of services.
"""
nextPageToken = _messages.StringField(1)
services = _messages.MessageField('Service', 2, repeated=True)
class ListWorkloadsResponse(_messages.Message):
r"""The response message for RegistrationService.ListWorkloads.
Fields:
nextPageToken: Token to retrieve the next page of results, or empty if
there are no more results in the list.
workloads: The list of workloads.
"""
nextPageToken = _messages.StringField(1)
workloads = _messages.MessageField('Workload', 2, repeated=True)
class Location(_messages.Message):
r"""A resource that represents a Google Cloud location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class LogConfig(_messages.Message):
r"""Specifies what kind of log the caller must write
Fields:
cloudAudit: Cloud audit options.
counter: Counter options.
dataAccess: Data access options.
"""
cloudAudit = _messages.MessageField('CloudAuditOptions', 1)
counter = _messages.MessageField('CounterOptions', 2)
dataAccess = _messages.MessageField('DataAccessOptions', 3)
class Namespace(_messages.Message):
r"""A container for services. Namespaces allow administrators to group
services together and define permissions for a collection of services.
Messages:
LabelsValue: Optional. Resource labels associated with this namespace. No
more than 64 user labels can be associated with a given resource. Label
keys and values can be no longer than 63 characters.
Fields:
attributes: Optional. Attributes associated with this Namespace.
createTime: Output only. The timestamp when the namespace was created.
labels: Optional. Resource labels associated with this namespace. No more
than 64 user labels can be associated with a given resource. Label keys
and values can be no longer than 63 characters.
name: Immutable. The resource name for the namespace in the format
`projects/*/locations/*/namespaces/*`.
uid: Output only. A globally unique identifier (in UUID4 format) for this
namespace.
updateTime: Output only. The timestamp when the namespace was last
updated.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. Resource labels associated with this namespace. No more than
64 user labels can be associated with a given resource. Label keys and
values can be no longer than 63 characters.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('NamespaceAttributes', 1)
createTime = _messages.StringField(2)
labels = _messages.MessageField('LabelsValue', 3)
name = _messages.StringField(4)
uid = _messages.StringField(5)
updateTime = _messages.StringField(6)
class NamespaceAttributes(_messages.Message):
r"""Attributes associated with Namespace.
Fields:
cloudDnsManagedZones: Output only. List of Cloud DNS ManagedZones that
this namespace is associated with.
managedRegistration: Output only. Indicates whether a GCP product or
service manages this resource. When a resource is fully managed by
another GCP product or system the information in Service Directory is
read-only. The source of truth is the relevant GCP product or system
which is managing the resource. The Service Directory resource will be
updated or deleted as appropriate to reflect the state of the underlying
`origin_resource`. Note: The `origin_resource` can be found in the
endpoint(s) associated with service(s) associated with this namespace.
"""
cloudDnsManagedZones = _messages.StringField(1, repeated=True)
managedRegistration = _messages.BooleanField(2)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members`, or principals, to a
single `role`. Principals can be user accounts, service accounts, Google
groups, and domains (such as G Suite). A `role` is a named list of
permissions; each `role` can be an IAM predefined role or a user-created
custom role. For some types of Google Cloud resources, a `binding` can also
specify a `condition`, which is a logical expression that allows access to a
resource only if the expression evaluates to `true`. A condition can add
constraints based on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies). **JSON example:** ``` { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com", "domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title": "expirable access",
"description": "Does not grant access after Sep 2020", "expression":
"request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: -
members: - user:mike@example.com - group:admins@example.com -
domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members: -
user:eve@example.com role: roles/resourcemanager.organizationViewer
condition: title: expirable access description: Does not grant access after
Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features,
see the [IAM documentation](https://cloud.google.com/iam/docs/).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members`, or principals, with a `role`.
Optionally, may specify a `condition` that determines how and when the
`bindings` are applied. Each of the `bindings` must contain at least one
principal. The `bindings` in a `Policy` can refer to up to 1,500
principals; up to 250 of these principals can be Google groups. Each
occurrence of a principal counts towards these limits. For example, if
the `bindings` grant 50 different roles to `user:alice@example.com`, and
not to any other principal, then you can add another 1,450 principals to
the `bindings` in the `Policy`.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
rules: If more than one rule is specified, the rules are applied in the
following manner: - All matching LOG rules are always applied. - If any
DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be
applied if one or more matching rule requires logging. - Otherwise, if
any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging
will be applied if one or more matching rule requires logging. -
Otherwise, if no rule applies, permission is denied.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
rules = _messages.MessageField('Rule', 4, repeated=True)
version = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class ResolveServiceRequest(_messages.Message):
r"""The request message for LookupService.ResolveService. Looks up a service
by its name, returns the service and its endpoints.
Fields:
endpointFilter: Optional. The filter applied to the endpoints of the
resolved service. General `filter` string syntax: ` ()` * `` can be
`name`, `address`, `port`, or `metadata.` for map field * `` can be `<`,
`>`, `<=`, `>=`, `!=`, `=`, `:`. Of which `:` means `HAS`, and is
roughly the same as `=` * `` must be the same data type as field * ``
can be `AND`, `OR`, `NOT` Examples of valid filters: * `metadata.owner`
returns endpoints that have a annotation with the key `owner`, this is
the same as `metadata:owner` * `metadata.protocol=gRPC` returns
endpoints that have key/value `protocol=gRPC` * `address=192.108.1.105`
returns endpoints that have this address * `port>8080` returns endpoints
that have port number larger than 8080 * `name>projects/my-
project/locations/us-east1/namespaces/my-namespace/services/my-
service/endpoints/endpoint-c` returns endpoints that have name that is
alphabetically later than the string, so "endpoint-e" is returned but
"endpoint-a" is not * `name=projects/my-project/locations/us-
central1/namespaces/my-namespace/services/my-service/endpoints/ep-1`
returns the endpoint that has an endpoint_id equal to `ep-1` *
`metadata.owner!=sd AND metadata.foo=bar` returns endpoints that have
`owner` in annotation key but value is not `sd` AND have key/value
`foo=bar` * `doesnotexist.foo=bar` returns an empty list. Note that
endpoint doesn't have a field called "doesnotexist". Since the filter
does not match any endpoint, it returns no results For more information
about filtering, see [API Filtering](https://aip.dev/160).
maxEndpoints: Optional. The maximum number of endpoints to return.
Defaults to 25. Maximum is 100. If a value less than one is specified,
the Default is used. If a value greater than the Maximum is specified,
the Maximum is used.
"""
endpointFilter = _messages.StringField(1)
maxEndpoints = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class ResolveServiceResponse(_messages.Message):
r"""The response message for LookupService.ResolveService.
Fields:
service: A Service attribute.
"""
service = _messages.MessageField('Service', 1)
class Rule(_messages.Message):
r"""A rule to be applied in a Policy.
Enums:
ActionValueValuesEnum: Required
Fields:
action: Required
conditions: Additional restrictions that must be met. All conditions must
pass for the rule to match.
description: Human-readable description of the rule.
in_: If one or more 'in' clauses are specified, the rule matches if the
PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
logConfig: The config returned to callers of CheckPolicy for any entries
that match the LOG action.
notIn: If one or more 'not_in' clauses are specified, the rule matches if
the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries. The format
for in and not_in entries can be found at in the Local IAM documentation
(see go/local-iam#features).
permissions: A permission is a string of form '..' (e.g.,
'storage.buckets.list'). A value of '*' matches all permissions, and a
verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""Required
Values:
NO_ACTION: Default no action.
ALLOW: Matching 'Entries' grant access.
ALLOW_WITH_LOG: Matching 'Entries' grant access and the caller promises
to log the request per the returned log_configs.
DENY: Matching 'Entries' deny access.
DENY_WITH_LOG: Matching 'Entries' deny access and the caller promises to
log the request per the returned log_configs.
LOG: Matching 'Entries' tell IAM.Check callers to generate logs.
"""
NO_ACTION = 0
ALLOW = 1
ALLOW_WITH_LOG = 2
DENY = 3
DENY_WITH_LOG = 4
LOG = 5
action = _messages.EnumField('ActionValueValuesEnum', 1)
conditions = _messages.MessageField('Condition', 2, repeated=True)
description = _messages.StringField(3)
in_ = _messages.StringField(4, repeated=True)
logConfig = _messages.MessageField('LogConfig', 5, repeated=True)
notIn = _messages.StringField(6, repeated=True)
permissions = _messages.StringField(7, repeated=True)
class Service(_messages.Message):
r"""An individual service. A service contains a name and optional metadata.
A service must exist before endpoints can be added to it.
Enums:
CriticalityValueValuesEnum: Optional. Criticality level of this service.
Messages:
MetadataValue: Optional. Metadata for the service. This data can be
consumed by service clients. Restrictions: * The entire metadata
dictionary may contain up to 2000 characters, spread accoss all key-
value pairs. Metadata that goes beyond this limit are rejected * Valid
metadata keys have two segments: an optional prefix and name, separated
by a slash (/). The name segment is required and must be 63 characters
or less, beginning and ending with an alphanumeric character
([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and
alphanumerics between. The prefix is optional. If specified, the prefix
must be a DNS subdomain: a series of DNS labels separated by dots (.),
not longer than 253 characters in total, followed by a slash (/).
Metadata that fails to meet these requirements are rejected Note: This
field is equivalent to the `annotations` field in the v1 API. They have
the same syntax and read/write to the same location in Service
Directory.
Fields:
assets: Output only. Assets that are part of this service (output only).
Example for Google Compute Engine assets: [
//compute.googleapis.com/projects/1234/regions/us-
east1/forwardingRules/fr1 ]
attributes: Optional. Attributes associated with this Service.
createTime: Output only. The timestamp when the service was created.
criteria: Optional. Criteria to apply to identify components belonging to
this service. Only one criteria allowed. Eg. create service representing
forwarding rule fr1: [ { key: FORWARDING_RULE, value:
'//compute.googleapis.com/projects/123/zones/us-
east1-c/forwardingRules/fr1' } ]
criticality: Optional. Criticality level of this service.
description: Optional. Human readable explanation of the service and what
it does.
displayName: Optional. User-friendly display name for service.
endpoints: Output only. Endpoints associated with this service. Returned
on LookupService.ResolveService. Control plane clients should use
RegistrationService.ListEndpoints.
environment: Optional. User-friendly string that indicates the environment
for this service.
hostname: Optional. Hostname. Service consumer may use for: 1) HTTP
parameter for Host (HTTP/1.1) or Authority (HTTP/2, HTTP/3) 2) TLS SNI
Hostname parameter (most commonly used for HTTPS) 3) TLS Hostname
Authorization against the x509 SAN DNS entries (necessary for HTTPS)
Example: `service.example.com` Limits: Field limited to 255 ASCII
characters per https://www.ietf.org/rfc/rfc1035.txt
metadata: Optional. Metadata for the service. This data can be consumed by
service clients. Restrictions: * The entire metadata dictionary may
contain up to 2000 characters, spread accoss all key-value pairs.
Metadata that goes beyond this limit are rejected * Valid metadata keys
have two segments: an optional prefix and name, separated by a slash
(/). The name segment is required and must be 63 characters or less,
beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with
dashes (-), underscores (_), dots (.), and alphanumerics between. The
prefix is optional. If specified, the prefix must be a DNS subdomain: a
series of DNS labels separated by dots (.), not longer than 253
characters in total, followed by a slash (/). Metadata that fails to
meet these requirements are rejected Note: This field is equivalent to
the `annotations` field in the v1 API. They have the same syntax and
read/write to the same location in Service Directory.
name: Immutable. The resource name for the service in the format
`projects/*/locations/*/namespaces/*/services/*`.
owners: Optional. List of contacts for this service. This can include
application engineers, architects, SRE, ops team, business owners etc.
serviceIdentities: Optional. Authorized Service Identities. If provided,
the consumer may use this information to determine whether the service
provider is authorized. Examples: `spiffe_id:spiffe://example.org/my-
service` `service_account:my-service@iam.gserviceaccount.com` Limits:
service_identities list is limited to 10 items.
uid: Output only. A globally unique identifier (in UUID4 format) for this
service.
updateTime: Output only. The timestamp when the service was last updated.
Note: endpoints being created/deleted/updated within the service are not
considered service updates for the purpose of this timestamp.
"""
class CriticalityValueValuesEnum(_messages.Enum):
r"""Optional. Criticality level of this service.
Values:
CRITICALITY_UNSPECIFIED: Default. Resource is not supported and is not
expected to provide any guarantees.
MISSION_CRITICAL: The resource is mission-critical to the organization.
HIGH: The resource may not directly affect the mission of a specific
unit, but is of high importance to the organization.
MEDIUM: The resource is of medium importance to the organization.
LOW: The resource is of low importance to the organization.
"""
CRITICALITY_UNSPECIFIED = 0
MISSION_CRITICAL = 1
HIGH = 2
MEDIUM = 3
LOW = 4
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Optional. Metadata for the service. This data can be consumed by
service clients. Restrictions: * The entire metadata dictionary may
contain up to 2000 characters, spread accoss all key-value pairs. Metadata
that goes beyond this limit are rejected * Valid metadata keys have two
segments: an optional prefix and name, separated by a slash (/). The name
segment is required and must be 63 characters or less, beginning and
ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-),
underscores (_), dots (.), and alphanumerics between. The prefix is
optional. If specified, the prefix must be a DNS subdomain: a series of
DNS labels separated by dots (.), not longer than 253 characters in total,
followed by a slash (/). Metadata that fails to meet these requirements
are rejected Note: This field is equivalent to the `annotations` field in
the v1 API. They have the same syntax and read/write to the same location
in Service Directory.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Additional properties of type MetadataValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
assets = _messages.MessageField('Asset', 1, repeated=True)
attributes = _messages.MessageField('Attributes', 2)
createTime = _messages.StringField(3)
criteria = _messages.MessageField('ServiceCriteria', 4, repeated=True)
criticality = _messages.EnumField('CriticalityValueValuesEnum', 5)
description = _messages.StringField(6)
displayName = _messages.StringField(7)
endpoints = _messages.MessageField('Endpoint', 8, repeated=True)
environment = _messages.StringField(9)
hostname = _messages.StringField(10)
metadata = _messages.MessageField('MetadataValue', 11)
name = _messages.StringField(12)
owners = _messages.MessageField('ContactInfo', 13, repeated=True)
serviceIdentities = _messages.MessageField('ServiceIdentity', 14, repeated=True)
uid = _messages.StringField(15)
updateTime = _messages.StringField(16)
class ServiceCriteria(_messages.Message):
r"""Criteria to apply to identify components belonging to this service.
Enums:
KeyValueValuesEnum: Required. Key for criteria.
Fields:
key: Required. Key for criteria.
value: Required. Criteria value to match against for the associated
criteria key. Example: //compute.googleapis.com/projects/123/regions/us-
west1/forwardingRules/fr1
"""
class KeyValueValuesEnum(_messages.Enum):
r"""Required. Key for criteria.
Values:
CRITERIA_KEY_UNSPECIFIED: Default. Criteria.key is unspecified.
FORWARDING_RULE: Criteria type of Forwarding Rule. Example value:
//compute.googleapis.com/projects/123/regions/us-
west1/forwardingRules/fr1
GKE_GATEWAY: Criteria type of GKE Gateway. Example value:
//container.googleapis.com/projects/123/zones/us-
central1-a/clusters/my-cluster/k8s/apis/gateway.networking.k8s.io/v1al
pha2/namespaces/default/gateways/my-gateway
APP_HUB_SERVICE: Criteria type of App Hub service. Example value:
//servicedirectory.googleapis.com/projects/1234/locations/us-
west1/namespaces/my-ns/services/gshoe-service
APP_HUB_WORKLOAD: Criteria type of App Hub workload. Example value:
//servicedirectory.googleapis.com/projects/1234/locations/us-
west1/namespaces/my-ns/workloads/gshoe-workload
"""
CRITERIA_KEY_UNSPECIFIED = 0
FORWARDING_RULE = 1
GKE_GATEWAY = 2
APP_HUB_SERVICE = 3
APP_HUB_WORKLOAD = 4
key = _messages.EnumField('KeyValueValuesEnum', 1)
value = _messages.StringField(2)
class ServiceIdentity(_messages.Message):
r"""Specifies the Service Identity of the authorized server.
Fields:
serviceAccount: service_account: gcp service account, usable with ALTS.
Example: `my-service@iam.gserviceaccount.com` Limits: Limited to 320
characters. https://tools.ietf.org/html/rfc3696
spiffeId: spiffe_id as works with x509 certs with Subject Alternative Name
(SAN) specified as uniformResourceIdentifier:*spiffe_id* Example:
`spiffe://example.org/my-service` Limits: 2048 ASCII Characters
https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-
ID.md#23-maximum-spiffe-id-length
"""
serviceAccount = _messages.StringField(1)
spiffeId = _messages.StringField(2)
class ServicedirectoryProjectsLocationsGetRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsListRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsListRequest object.
Fields:
filter: A filter to narrow down results to a preferred subset. The
filtering language accepts strings like `"displayName=tokyo"`, and is
documented in more detail in [AIP-160](https://google.aip.dev/160).
includeUnrevealedLocations: If true, the returned list will include
locations which are not yet revealed.
name: The resource that owns the locations collection, if applicable.
pageSize: The maximum number of results to return. If not set, the service
selects a default.
pageToken: A page token received from the `next_page_token` field in the
response. Send that page token to receive the subsequent page.
"""
filter = _messages.StringField(1)
includeUnrevealedLocations = _messages.BooleanField(2)
name = _messages.StringField(3, required=True)
pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(5)
class ServicedirectoryProjectsLocationsNamespacesCreateRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesCreateRequest object.
Fields:
namespace: A Namespace resource to be passed as the request body.
namespaceId: Required. The Resource ID must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long
and match the regular expression `[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit, except
the last character, which cannot be a dash.
parent: Required. The resource name of the project and location the
namespace will be created in.
"""
namespace = _messages.MessageField('Namespace', 1)
namespaceId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ServicedirectoryProjectsLocationsNamespacesDeleteRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesDeleteRequest object.
Fields:
name: Required. The name of the namespace to delete.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesGetIamPolicyRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesGetIamPolicyRequest object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class ServicedirectoryProjectsLocationsNamespacesGetRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesGetRequest object.
Fields:
name: Required. The name of the namespace to retrieve.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesListRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesListRequest object.
Fields:
filter: Optional. The filter to list results by. General `filter` string
syntax: ` ()` * `` can be `name`, `labels.` for map field, or
`attributes.` for attributes field * `` can be `<`, `>`, `<=`, `>=`,
`!=`, `=`, `:`. Of which `:` means `HAS`, and is roughly the same as `=`
* `` must be the same data type as field * `` can be `AND`, `OR`, `NOT`
Examples of valid filters: * `labels.owner` returns namespaces that have
a label with the key `owner`, this is the same as `labels:owner` *
`labels.owner=sd` returns namespaces that have key/value `owner=sd` *
`name>projects/my-project/locations/us-east1/namespaces/namespace-c`
returns namespaces that have name that is alphabetically later than the
string, so "namespace-e" is returned but "namespace-a" is not *
`labels.owner!=sd AND labels.foo=bar` returns namespaces that have
`owner` in label key but value is not `sd` AND have key/value `foo=bar`
* `doesnotexist.foo=bar` returns an empty list. Note that namespace
doesn't have a field called "doesnotexist". Since the filter does not
match any namespaces, it returns no results *
`attributes.managed_registration=true` returns namespaces that are
managed by a GCP product or service For more information about
filtering, see [API Filtering](https://aip.dev/160).
orderBy: Optional. The order to list results by. General `order_by` string
syntax: ` () (,)` * `` allows value: `name` * `` ascending or descending
order by ``. If this is left blank, `asc` is used Note that an empty
`order_by` string results in default order, which is order by `name` in
ascending order.
pageSize: Optional. The maximum number of items to return.
pageToken: Optional. The next_page_token value returned from a previous
List request, if any.
parent: Required. The resource name of the project and location whose
namespaces you'd like to list.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class ServicedirectoryProjectsLocationsNamespacesPatchRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesPatchRequest object.
Fields:
name: Immutable. The resource name for the namespace in the format
`projects/*/locations/*/namespaces/*`.
namespace: A Namespace resource to be passed as the request body.
updateMask: Required. List of fields to be updated in this request.
"""
name = _messages.StringField(1, required=True)
namespace = _messages.MessageField('Namespace', 2)
updateMask = _messages.StringField(3)
class ServicedirectoryProjectsLocationsNamespacesServicesCreateRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesCreateRequest
object.
Fields:
parent: Required. The resource name of the namespace this service will
belong to.
service: A Service resource to be passed as the request body.
serviceId: Required. The Resource ID must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long
and match the regular expression `[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit, except
the last character, which cannot be a dash.
"""
parent = _messages.StringField(1, required=True)
service = _messages.MessageField('Service', 2)
serviceId = _messages.StringField(3)
class ServicedirectoryProjectsLocationsNamespacesServicesDeleteRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesDeleteRequest
object.
Fields:
name: Required. The name of the service to delete.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesEndpointsCreateRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesServicesEndpointsCreateRequest
object.
Fields:
endpoint: A Endpoint resource to be passed as the request body.
endpointId: Required. The Resource ID must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long
and match the regular expression `[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?`
which means the first character must be a lowercase letter, and all
following characters must be a dash, lowercase letter, or digit, except
the last character, which cannot be a dash.
parent: Required. The resource name of the service that this endpoint
provides.
"""
endpoint = _messages.MessageField('Endpoint', 1)
endpointId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesEndpointsDeleteRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesServicesEndpointsDeleteRequest
object.
Fields:
name: Required. The name of the endpoint to delete.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesEndpointsGetRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesEndpointsGetRequest
object.
Fields:
name: Required. The name of the endpoint to get.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesEndpointsListRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesServicesEndpointsListRequest
object.
Fields:
filter: Optional. The filter to list results by. General `filter` string
syntax: ` ()` * `` can be `name`, `address`, `port`, `metadata.` for map
field, or `attributes.` for attributes field * `` can be `<`, `>`, `<=`,
`>=`, `!=`, `=`, `:`. Of which `:` means `HAS`, and is roughly the same
as `=` * `` must be the same data type as field * `` can be `AND`, `OR`,
`NOT` Examples of valid filters: * `metadata.owner` returns endpoints
that have a metadata with the key `owner`, this is the same as
`metadata:owner` * `metadata.protocol=gRPC` returns endpoints that have
key/value `protocol=gRPC` * `address=192.108.1.105` returns endpoints
that have this address * `port>8080` returns endpoints that have port
number larger than 8080 * `name>projects/my-project/locations/us-
east1/namespaces/my-namespace/services/my-service/endpoints/endpoint-c`
returns endpoints that have name that is alphabetically later than the
string, so "endpoint-e" is returned but "endpoint-a" is not *
`metadata.owner!=sd AND metadata.foo=bar` returns endpoints that have
`owner` in metadata key but value is not `sd` AND have key/value
`foo=bar` * `doesnotexist.foo=bar` returns an empty list. Note that
endpoint doesn't have a field called "doesnotexist". Since the filter
does not match any endpoints, it returns no results *
`attributes.kubernetes_resource_type=KUBERNETES_RESOURCE_TYPE_CLUSTER_
IP` returns endpoints with the corresponding kubernetes_resource_type
For more information about filtering, see [API
Filtering](https://aip.dev/160).
orderBy: Optional. The order to list results by. General `order_by` string
syntax: ` () (,)` * `` allows values: `name`, `address`, `port` * ``
ascending or descending order by ``. If this is left blank, `asc` is
used Note that an empty `order_by` string results in default order,
which is order by `name` in ascending order.
pageSize: Optional. The maximum number of items to return.
pageToken: Optional. The next_page_token value returned from a previous
List request, if any.
parent: Required. The resource name of the service whose endpoints you'd
like to list.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesEndpointsPatchRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesServicesEndpointsPatchRequest
object.
Fields:
endpoint: A Endpoint resource to be passed as the request body.
name: Immutable. The resource name for the endpoint in the format
`projects/*/locations/*/namespaces/*/services/*/endpoints/*`.
updateMask: Required. List of fields to be updated in this request.
"""
endpoint = _messages.MessageField('Endpoint', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class ServicedirectoryProjectsLocationsNamespacesServicesGetIamPolicyRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesGetIamPolicyRequest
object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesGetRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesGetRequest object.
Fields:
name: Required. The name of the service to get.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesListRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesListRequest object.
Fields:
filter: Optional. The filter to list results by. General `filter` string
syntax: ` ()` * `` can be `name` or `metadata.` for map field * `` can
be `<`, `>`, `<=`, `>=`, `!=`, `=`, `:`. Of which `:` means `HAS`, and
is roughly the same as `=` * `` must be the same data type as field * ``
can be `AND`, `OR`, `NOT` Examples of valid filters: * `metadata.owner`
returns services that have a metadata with the key `owner`, this is the
same as `metadata:owner` * `metadata.protocol=gRPC` returns services
that have key/value `protocol=gRPC` * `name>projects/my-
project/locations/us-east1/namespaces/my-namespace/services/service-c`
returns services that have name that is alphabetically later than the
string, so "service-e" is returned but "service-a" is not *
`metadata.owner!=sd AND metadata.foo=bar` returns services that have
`owner` in metadata key but value is not `sd` AND have key/value
`foo=bar` * `doesnotexist.foo=bar` returns an empty list. Note that
service doesn't have a field called "doesnotexist". Since the filter
does not match any services, it returns no results *
`attributes.managed_registration=true` returns services that are managed
by a GCP product or service For more information about filtering, see
[API Filtering](https://aip.dev/160).
orderBy: Optional. The order to list results by. General `order_by` string
syntax: ` () (,)` * `` allows value: `name` * `` ascending or descending
order by ``. If this is left blank, `asc` is used Note that an empty
`order_by` string results in default order, which is order by `name` in
ascending order.
pageSize: Optional. The maximum number of items to return.
pageToken: Optional. The next_page_token value returned from a previous
List request, if any.
parent: Required. The resource name of the namespace whose services you'd
like to list.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class ServicedirectoryProjectsLocationsNamespacesServicesPatchRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesPatchRequest
object.
Fields:
name: Immutable. The resource name for the service in the format
`projects/*/locations/*/namespaces/*/services/*`.
service: A Service resource to be passed as the request body.
updateMask: Required. List of fields to be updated in this request.
"""
name = _messages.StringField(1, required=True)
service = _messages.MessageField('Service', 2)
updateMask = _messages.StringField(3)
class ServicedirectoryProjectsLocationsNamespacesServicesResolveRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesResolveRequest
object.
Fields:
name: Required. The name of the service to resolve.
resolveServiceRequest: A ResolveServiceRequest resource to be passed as
the request body.
"""
name = _messages.StringField(1, required=True)
resolveServiceRequest = _messages.MessageField('ResolveServiceRequest', 2)
class ServicedirectoryProjectsLocationsNamespacesServicesSetIamPolicyRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesServicesSetIamPolicyRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class ServicedirectoryProjectsLocationsNamespacesServicesTestIamPermissionsRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesServicesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class ServicedirectoryProjectsLocationsNamespacesSetIamPolicyRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class ServicedirectoryProjectsLocationsNamespacesTestIamPermissionsRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsCreateRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesWorkloadsCreateRequest
object.
Fields:
parent: Required. The resource name of the namespace this service workload
will belong to.
workload: A Workload resource to be passed as the request body.
workloadId: Required. The Resource ID must be 1-63 characters long, and
comply with [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
Specifically, the name must be 1-63 characters long and match the
regular expression `[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?` which means the
first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
"""
parent = _messages.StringField(1, required=True)
workload = _messages.MessageField('Workload', 2)
workloadId = _messages.StringField(3)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsDeleteRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesWorkloadsDeleteRequest
object.
Enums:
ManagerTypeValueValuesEnum: Stores extra information about what Google
resource is directly responsible for a given Workload resource.
Fields:
managerType: Stores extra information about what Google resource is
directly responsible for a given Workload resource.
name: Required. The name of the workload to delete.
"""
class ManagerTypeValueValuesEnum(_messages.Enum):
r"""Stores extra information about what Google resource is directly
responsible for a given Workload resource.
Values:
TYPE_UNSPECIFIED: Default. Should not be used.
GKE_HUB: Resource managed by GKE Hub.
BACKEND_SERVICE: Resource managed by Arcus, Backend Service
"""
TYPE_UNSPECIFIED = 0
GKE_HUB = 1
BACKEND_SERVICE = 2
managerType = _messages.EnumField('ManagerTypeValueValuesEnum', 1)
name = _messages.StringField(2, required=True)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsGetIamPolicyRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesWorkloadsGetIamPolicyRequest
object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsGetRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesWorkloadsGetRequest object.
Fields:
name: Required. The name of the service workload to get.
"""
name = _messages.StringField(1, required=True)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsListRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesWorkloadsListRequest
object.
Fields:
filter: Optional. The filter to list results by. General `filter` string
syntax: ` ()` * `` can be any field name on the Workload proto. For
example: `name`, `create_time`, `annotations.`, or `components` * `` can
be `<`, `>`, `<=`, `>=`, `!=`, `=`, `:`. Of which `:` means `HAS`, and
is roughly the same as `=` * `` must be the same data type as field * ``
can be `AND`, `OR`, `NOT` Examples of valid filters: *
`annotations.owner` returns workloads that have an annotation with the
key `owner`, this is the same as `annotations:owner` *
`components://compute.googleapis.com/projects/1234/zones/us-
east1-c/instances/mig1\ returns workloads that contain the specified
component. * `name>projects/my-project/locations/us-east1/namespaces/my-
namespace/workloads/workload-c` returns workloads that have names that
are alphabetically later than the string, so "workload-e" is returned
but "workload-a" is not * `annotations.owner!=sd AND
annotations.foo=bar` returns workloads that have `owner` in annotation
key but value is not `sd` AND have key/value `foo=bar` *
`doesnotexist.foo=bar` returns an empty list. Note that workload doesn't
have a field called "doesnotexist". Since the filter does not match any
workloads, it returns no results For more information about filtering,
see [API Filtering](https://aip.dev/160).
orderBy: Optional. The order to list results by. General `order_by` string
syntax: ` () (,)` * `` allows values: `name`, `display_name`,
`create_time`, `update_time` * `` ascending or descending order by ``.
If this is left blank, `asc` is used Note that an empty `order_by`
string results in default order, which is order by `name` in ascending
order.
pageSize: Optional. The maximum number of items to return.
pageToken: Optional. The next_page_token value returned from a previous
List request, if any.
parent: Required. The resource name of the namespace whose service
workloads you'd like to list.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsPatchRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesWorkloadsPatchRequest
object.
Fields:
name: Immutable. The resource name for the workload in the format
`projects/*/locations/*/namespaces/*/workloads/*`.
updateMask: Required. List of fields to be updated in this request.
Allowable fields: `display_name`, `annotations`. -- Internal
integrations may update other fields
workload: A Workload resource to be passed as the request body.
"""
name = _messages.StringField(1, required=True)
updateMask = _messages.StringField(2)
workload = _messages.MessageField('Workload', 3)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsSetIamPolicyRequest(_messages.Message):
r"""A
ServicedirectoryProjectsLocationsNamespacesWorkloadsSetIamPolicyRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class ServicedirectoryProjectsLocationsNamespacesWorkloadsTestIamPermissionsRequest(_messages.Message):
r"""A ServicedirectoryProjectsLocationsNamespacesWorkloadsTestIamPermissions
Request object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the
appropriate value for this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Google Cloud services (such as Projects) might
reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used: `paths: "bindings, etag"`
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as `*` or `storage.*`) are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class Workload(_messages.Message):
r"""An individual Workload. A logical collection of assets that provide the
same functionality, with a common set of core attributes, that power
services in Service Directory and to which policies can be applied.
Enums:
CriticalityValueValuesEnum: Optional. Criticality of this workload.
Fields:
assets: Output only. Assets that are part of this workload (output only).
Example for Google Compute Engine assets: [
//compute.googleapis.com/projects/1234/zones/us-
east1-c/instanceGroups/mig1,
//compute.googleapis.com/projects/1234/zones/us-
east1-a/instanceGroups/mig2 ]
createTime: Output only. The timestamp when this workload was created in
Service Directory.
criteria: Optional. Criteria to apply to identify assets belonging to this
workload. Used to auto-populate the assets fields. Repeated list of
tuples of . Multiple values are treated as OR expression, and assets
matching any of the entries will be selected. Eg. select all resources
of workloadType behind backend service bs1: [ { key: BACKEND_SERVICE,
value: '//compute.googleapis.com/projects/123/zones/us-
east1-c/backendServices/bs1' } ] Eg. select all resources of
workloadType behind backend services in bs1 or bs2: [ { key:
BACKEND_SERVICE, value: '//compute.googleapis.com/projects/123/zones/us-
east1-c/backendServices/bs1' }, { key: BACKEND_SERVICE, value:
'//compute.googleapis.com/projects/123/regions/us-
east1/backendServices/bs2' }, ] Eg. select resources explicitly by name
to be part of the workload: [ { key: INSTANCE_GROUP, value:
'//compute.googleapis.com/projects/1234/zones/us-
east1-c/instanceGroups/mig1' }, { key: INSTANCE_GROUP, value:
'//compute.googleapis.com/projects/1234/regions/us-
east1/instanceGroups/mig2' } ]
criticality: Optional. Criticality of this workload.
description: Optional. Human readable explanation of the workload and what
it does.
displayName: Optional. Friendly name. User modifiable.
environment: Optional. User-friendly string that indicates the environment
for this workload.
internalAttributes: Optional. Internal Attributes associated with this
workload. This field should stay GOOGLE_INTERNAL post launch.
name: Immutable. The resource name for the workload in the format
`projects/*/locations/*/namespaces/*/workloads/*`.
owners: Optional. List of contacts for this workload. This can include
application engineers, architects, SRE, ops team, business owners etc.
uid: Output only. A globally unique identifier (in UUID4 format) for this
workload.
updateTime: Output only. The timestamp when the workload was last updated
in Service Directory.
"""
class CriticalityValueValuesEnum(_messages.Enum):
r"""Optional. Criticality of this workload.
Values:
CRITICALITY_UNSPECIFIED: Default. Resource is not supported and is not
expected to provide any guarantees.
MISSION_CRITICAL: The resource is mission-critical to the organization.
HIGH: The resource may not directly affect the mission of a specific
unit, but is of high importance to the organization.
MEDIUM: The resource is of medium importance to the organization.
LOW: The resource is of low importance to the organization.
"""
CRITICALITY_UNSPECIFIED = 0
MISSION_CRITICAL = 1
HIGH = 2
MEDIUM = 3
LOW = 4
assets = _messages.MessageField('Asset', 1, repeated=True)
createTime = _messages.StringField(2)
criteria = _messages.MessageField('WorkloadCriteria', 3, repeated=True)
criticality = _messages.EnumField('CriticalityValueValuesEnum', 4)
description = _messages.StringField(5)
displayName = _messages.StringField(6)
environment = _messages.StringField(7)
internalAttributes = _messages.MessageField('InternalAttributes', 8)
name = _messages.StringField(9)
owners = _messages.MessageField('ContactInfo', 10, repeated=True)
uid = _messages.StringField(11)
updateTime = _messages.StringField(12)
class WorkloadCriteria(_messages.Message):
r"""Criteria to apply to identify assets belonging to this workload. Used to
auto-populate the assets field.
Enums:
KeyValueValuesEnum: Required. Key for criteria.
Fields:
key: Required. Key for criteria.
value: Required. Criteria value to match against for the associated
criteria key. Example: //compute.googleapis.com/projects/123/regions/us-
west1/backendServices/bs1
"""
class KeyValueValuesEnum(_messages.Enum):
r"""Required. Key for criteria.
Values:
CRITERIA_KEY_UNSPECIFIED: Default. Criteria.key is unspecified.
INSTANCE_GROUP: The criteria key is Instance Group.
BACKEND_SERVICE: The criteria key is Backend Service.
"""
CRITERIA_KEY_UNSPECIFIED = 0
INSTANCE_GROUP = 1
BACKEND_SERVICE = 2
key = _messages.EnumField('KeyValueValuesEnum', 1)
value = _messages.StringField(2)
encoding.AddCustomJsonFieldMapping(
Rule, 'in_', 'in')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
db4a5b1057320eaf972ba83b5616f4e54d71cc19 | 99ed889f5d679f0712a9578435819ff9fe1038e9 | /baselines_tactile/ppo2/test_microbatches.py | c758fb8f5ba3adf9123d5c59631875cee4554f44 | [] | no_license | WMViolet/baselines_tactile | 7e7800c44167d6e29f4f4a187e49d92462f49100 | 761193122ff8c914d8b983d93620a7ffc63ea917 | refs/heads/main | 2023-02-24T00:30:04.616016 | 2021-02-01T23:45:53 | 2021-02-01T23:45:53 | 322,393,115 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | from envs import gym
import tensorflow as tf
import numpy as np
from functools import partial
from tactile_baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from tactile_baselines.common.tf_util import make_session
from tactile_baselines.ppo2.ppo2 import learn
from tactile_baselines.ppo2.microbatched_model import MicrobatchedModel
def test_microbatches():
def env_fn():
env = gym.make('CartPole-v0')
env.seed(0)
return env
learn_fn = partial(learn, network='mlp', nsteps=32, total_timesteps=32, seed=0)
env_ref = DummyVecEnv([env_fn])
sess_ref = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_ref)
vars_ref = {v.name: sess_ref.run(v) for v in tf.trainable_variables()}
env_test = DummyVecEnv([env_fn])
sess_test = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_test, model_fn=partial(MicrobatchedModel, microbatch_size=2))
# learn_fn(env=env_test)
vars_test = {v.name: sess_test.run(v) for v in tf.trainable_variables()}
for v in vars_ref:
np.testing.assert_allclose(vars_ref[v], vars_test[v], atol=3e-3)
if __name__ == '__main__':
test_microbatches()
| [
"violetfuyao@berkeley.edu"
] | violetfuyao@berkeley.edu |
6eaff47ed53d499b32e238b5a7097c89a1bc0175 | f6641c552622e1446d913d50f561ff14c524e885 | /data/box_data_bidirect.py | 76bae58334dd110d98ed895f37fc04711a480921 | [] | no_license | yangyi02/video_motion_synthetic3 | 939d1ddd3a4caada87e0e2ef3ed430dae9b2447e | e732d3641c555422b977648211683cb21186bcdb | refs/heads/master | 2021-01-01T06:46:55.553125 | 2017-08-04T00:19:28 | 2017-08-04T00:19:28 | 97,509,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | import numpy
from synthetic_data_bidirect import SyntheticDataBidirect
import learning_args
import logging
logging.basicConfig(format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s',
level=logging.INFO)
class BoxDataBidirect(SyntheticDataBidirect):
def __init__(self, args):
super(BoxDataBidirect, self).__init__(args)
self.fg_noise = args.fg_noise
self.bg_noise = args.bg_noise
self.train_images, self.test_images = None, None
def generate_source_image(self):
batch_size, num_objects, im_size = self.batch_size, self.num_objects, self.im_size
im = numpy.zeros((num_objects, batch_size, 3, im_size, im_size))
mask = numpy.zeros((num_objects, batch_size, 1, im_size, im_size))
for i in range(num_objects):
for j in range(batch_size):
width = numpy.random.randint(im_size/8, im_size*3/4)
height = numpy.random.randint(im_size/8, im_size*3/4)
x = numpy.random.randint(0, im_size - width)
y = numpy.random.randint(0, im_size - height)
color = numpy.random.uniform(self.bg_noise, 1 - self.fg_noise, 3)
for k in range(3):
im[i, j, k, y:y+height, x:x+width] = color[k]
noise = numpy.random.rand(3, height, width) * self.fg_noise
im[i, j, :, y:y+height, x:x+width] = im[i, j, :, y:y+height, x:x+width] + noise
mask[i, j, 0, y:y+height, x:x+width] = num_objects - i
return im, mask
def get_next_batch(self, images=None):
src_image, src_mask = self.generate_source_image()
im, motion, motion_r, motion_label, motion_label_r, seg_layer = self.generate_data(src_image, src_mask)
return im, motion, motion_r, motion_label, motion_label_r, seg_layer
def unit_test():
args = learning_args.parse_args()
logging.info(args)
data = BoxDataBidirect(args)
im, motion, motion_r, motion_label, motion_label_r, seg_layer = data.get_next_batch()
data.display(im, motion, motion_r, seg_layer)
if __name__ == '__main__':
unit_test()
| [
"yangyi02@gmail.com"
] | yangyi02@gmail.com |
adb66b38b38c519f92faf97c07eefe21491c1f8b | bf0c13d412a7021b299c5e0622e63e72172cf725 | /week4/todolist/api/migrations/0002_task_created_by.py | c072e72d50a61591cd796376074211e17682bbbe | [] | no_license | Alibek120699/BFDjango | 765e734e925041947f607a1d15228309dfa3e647 | eac06c317551c561ffccb44750862972ae11dea3 | refs/heads/master | 2022-12-01T15:49:39.402815 | 2020-04-19T21:09:39 | 2020-04-19T21:09:39 | 233,657,360 | 0 | 0 | null | 2022-11-22T05:49:56 | 2020-01-13T17:50:13 | Python | UTF-8 | Python | false | false | 593 | py | # Generated by Django 2.2 on 2020-02-16 13:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('api', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='task',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"sayakalibek1@gmail.com"
] | sayakalibek1@gmail.com |
8a56d384d8f53a7a0624ccda96ddcebda8177931 | acf8ce66cc5335b7a1ce98887949bee724d98d9e | /stu_and_extend/extend_info/views.py | e751667727978882e693199a847ea95679efd8f4 | [] | no_license | iversongit/20180426 | eec4f56612cd775be179783932fee19cd9f59096 | b5af588eff11940fff9222e9662ca3f49442ab10 | refs/heads/master | 2020-03-13T10:15:20.061654 | 2018-04-27T02:19:00 | 2018-04-27T02:19:00 | 131,079,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
# Create your views here.
from extend_info.models import Extend
from student.models import Student
def addExtendInfo(request,s_id):
if request.method == "GET":
return render(request,"addExtendInfo.html",{'s_id':s_id})
if request.method == "POST":
e_addr = request.POST.get("e_addr")
e_tel =request.POST.get("e_tel")
e_birth = request.POST.get("e_birth")
e_des = request.POST.get("e_des")
Extend.objects.create(
e_addr = e_addr,
e_tel = e_tel,
e_birth = e_birth,
e_des = e_des,
s_id=s_id
)
# return HttpResponse("添加成功")
return HttpResponseRedirect("/extendapp/showall")
def showAllInfo(request):
stus = Student.objects.all()
extinfos = Extend.objects.all()
return render(request,"showAllInfo.html",{'stus':stus,'extinfos':extinfos})
| [
"1564329410@qq.com"
] | 1564329410@qq.com |
d4fba6a899bdae2ae0774c15e23236244c0e3ec1 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/LArCalorimeter/LArExample/LArCalibProcessing/share/LArCalib_Example_HVCorr.py | e4330dd17032ade9800b69f61ce3b19e7ff0e949 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,059 | py | #################################################################
#
# example jobOptions to read HV from Cool/DCS in athena
# and compute cell level correction factors to store in conditions
# database
#
##################################################################
from time import strptime,time
from calendar import timegm
#set date to compute the correction
if "date" not in dir():
date="2013-02-06:09:45:00"
if "TimeStamp" not in dir():
try:
ts=strptime(date+'/UTC','%Y-%m-%d:%H:%M:%S/%Z')
TimeStamp=int(timegm(ts))*1000000000L
except ValueError:
print "ERROR in time specification, use e.g. 2007-05-25:14:01:00"
from LArCalibProcessing.TimeStampToRunLumi import TimeStampToRunLumi
rlb=TimeStampToRunLumi(TimeStamp,dbInstance="CONDBR2")
if rlb is None:
print "WARNING: Failed to convert time",TimeStamp,"into a run/lumi number"
RunNumber=999999
LumiBlock=0
else:
RunNumber=rlb[0]
LumiBlock=rlb[1]
print "---> Working on run",RunNumber,"LB",LumiBlock,"Timestamp:",TimeStamp
timediff=int(time()-(TimeStamp/1000000000L))
if timediff<0:
print "ERROR: Timestamp in the future???"
else:
(days,remainder)=divmod(timediff,24*60*60)
(hours,seconds)=divmod(remainder,60*60)
print "---> Timestamp is %i days %i hours and %i minutes ago" % (days,hours,int(seconds/60))
pass
# name of output local sql file
OutputSQLiteFile = 'HVScaleCorr.db'
# name of output Pool file
PoolFileName = "dummy.pool.root"
# database folder
LArHVScaleCorrFolder = "/LAR/ElecCalibFlat/HVScaleCorr"
# output key
keyOutput = "LArHVScaleCorr"
# tag suffix
#LArCalibFolderOutputTag = "-UPD3-00"
# write IOV
WriteIOV = True
# global tag to read other conditions if needed
if "GlobalTag" not in dir():
GlobalTag = 'LARCALIB-RUN2-00'
# begin run IOV
IOVBegin = 0
###################################################################
from RecExConfig.RecFlags import rec
rec.RunNumber.set_Value_and_Lock(int(RunNumber))
from PerfMonComps.PerfMonFlags import jobproperties
jobproperties.PerfMonFlags.doMonitoring = True
from AthenaCommon.DetFlags import DetFlags
DetFlags.all_setOff()
DetFlags.LAr_setOn()
DetFlags.Tile_setOn()
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('atlas')
globalflags.DataSource.set_Value_and_Lock('data')
globalflags.DatabaseInstance="CONDBR2"
# Get a handle to the default top-level algorithm sequence
from AthenaCommon.AppMgr import ToolSvc
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
# Get a handle to the ApplicationManager
from AthenaCommon.AppMgr import theApp
# Setup Db stuff
import AthenaPoolCnvSvc.AthenaPool
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion='ATLAS-R2-2015-04-00-00'
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from AtlasGeoModel import SetupRecoGeometry
svcMgr.IOVDbSvc.GlobalTag = GlobalTag
try:
svcMgr.IOVDbSvc.DBInstance=""
except:
pass
include( "AthenaCommon/Atlas_Gen.UnixStandardJob.py" )
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py")
#include( "CaloIdCnv/CaloIdCnv_joboptions.py" )
#include( "TileIdCnv/TileIdCnv_jobOptions.py" )
#include( "LArDetDescr/LArDetDescr_joboptions.py" )
#include("TileConditions/TileConditions_jobOptions.py" )
include("LArConditionsCommon/LArConditionsCommon_comm_jobOptions.py")
#include( "LArCondAthenaPool/LArCondAthenaPool_joboptions.py" )
include( "LArConditionsCommon/LArIdMap_comm_jobOptions.py" )
#block to read the existing HVCorr
conddb.blockFolder(LArHVScaleCorrFolder);
from LArConditionsCommon import LArHVDB #Sets HV Calbling and DCS Database folders
#conddb.addOverride("/LAR/IdentifierOfl/HVLineToElectrodeMap","LARIdentifierOflHVLineToElectrodeMap-UPD3-00")
from LArCondUtils.LArCondUtilsConf import LArHVToolDB
theLArHVToolDB = LArHVToolDB("LArHVToolDB")
ToolSvc += theLArHVToolDB
from LArRecUtils.LArRecUtilsConf import LArHVCorrTool
theLArHVCorrTool = LArHVCorrTool("LArHVCorrTool")
theLArHVCorrTool.keyOutput = keyOutput
theLArHVCorrTool.folderName= LArHVScaleCorrFolder
theLArHVCorrTool.HVTool = theLArHVToolDB
ToolSvc += theLArHVCorrTool
from LArCalibUtils.LArCalibUtilsConf import LArHVCorrMaker
theLArHVCorrMaker = LArHVCorrMaker("LArHVCorrMaker")
topSequence += theLArHVCorrMaker
from LArCalibTools.LArCalibToolsConf import LArHVScaleCorr2Ntuple
theLArHVScaleCorr2Ntuple = LArHVScaleCorr2Ntuple("LArHVScaleCorr2Ntuple")
theLArHVScaleCorr2Ntuple.AddFEBTempInfo = False
topSequence += theLArHVScaleCorr2Ntuple
#from LArCalibTools.LArCalibToolsConf import LArWFParams2Ntuple
#LArWFParams2Ntuple = LArWFParams2Ntuple("LArWFParams2Ntuple")
#LArWFParams2Ntuple.DumpTdrift = True
#topSequence += LArWFParams2Ntuple
theApp.HistogramPersistency = "ROOT"
from GaudiSvc.GaudiSvcConf import NTupleSvc
svcMgr += NTupleSvc()
svcMgr.NTupleSvc.Output = [ "FILE1 DATAFILE='hvcorr_ntuple.root' OPT='NEW'" ]
# deal with DB output
OutputObjectSpec = "CondAttrListCollection#"+LArHVScaleCorrFolder
OutputObjectSpecTag = ''
OutputDB = "sqlite://;schema="+OutputSQLiteFile+";dbname=CONDBR2"
from RegistrationServices.OutputConditionsAlg import OutputConditionsAlg
theOutputConditionsAlg=OutputConditionsAlg("OutputConditionsAlg",PoolFileName,
[OutputObjectSpec],[OutputObjectSpecTag],WriteIOV)
theOutputConditionsAlg.Run1 = IOVBegin
svcMgr.IOVDbSvc.dbConnection = OutputDB
from RegistrationServices.RegistrationServicesConf import IOVRegistrationSvc
svcMgr += IOVRegistrationSvc()
svcMgr.IOVRegistrationSvc.OutputLevel = DEBUG
svcMgr.IOVRegistrationSvc.RecreateFolders = True
svcMgr.IOVRegistrationSvc.SVFolder=True
svcMgr.IOVRegistrationSvc.OverrideNames += ["HVScaleCorr",]
svcMgr.IOVRegistrationSvc.OverrideTypes += ["Blob16M",]
#--------------------------------------------------------------
#--- Dummy event loop parameters
#--------------------------------------------------------------
svcMgr.EventSelector.RunNumber = RunNumber
svcMgr.EventSelector.EventsPerRun = 1
svcMgr.EventSelector.FirstEvent = 0
svcMgr.EventSelector.EventsPerLB = 1
svcMgr.EventSelector.FirstLB = LumiBlock
svcMgr.EventSelector.InitialTimeStamp = int(TimeStamp/1e9)
svcMgr.EventSelector.TimeStampInterval = 5
svcMgr.EventSelector.OverrideRunNumber=True
theApp.EvtMax = 1
#--------------------------------------------------------------
# Set output level threshold (1=VERBOSE, 2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.debugLimit = 100000
svcMgr.MessageSvc.infoLimit = 100000
svcMgr.MessageSvc.Format = "% F%30W%S%7W%R%T %0W%M"
svcMgr.IOVDbSvc.OutputLevel = INFO
theLArHVCorrMaker.OutputLevel = INFO
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
411ce7dd706c852ab886e83eaf557fd6b63de064 | f22cc86b1454d996d42a400904f5dd11cd21341e | /tensorflow_estimator/python/estimator/run_config_test.py | 85dd81ae9a3e2aacc51f0d07093d7eec5f9eea5f | [
"Apache-2.0"
] | permissive | ziky90/estimator | 0006fb7ef59f89948613e184c3778dc2b03ab78f | 825c02ce244ce21ec4f01360dfdf90cbf92f6bde | refs/heads/master | 2020-04-21T02:39:22.223173 | 2019-02-04T22:07:19 | 2019-02-04T22:07:45 | 169,260,730 | 0 | 0 | Apache-2.0 | 2019-02-05T15:12:27 | 2019-02-05T15:12:26 | null | UTF-8 | Python | false | false | 46,363 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RunConfig tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow_estimator.python.estimator import run_config as run_config_lib
_TEST_DIR = 'test_dir'
_MASTER = 'master_'
_NOT_SUPPORTED_REPLACE_PROPERTY_MSG = 'Replacing .*is not supported'
_SAVE_CKPT_ERR = (
'`save_checkpoints_steps` and `save_checkpoints_secs` cannot be both set.'
)
_MODEL_DIR_ERR = 'model_dir should be non-empty'
_MODEL_DIR_TF_CONFIG_ERR = 'model_dir in TF_CONFIG should be non-empty'
_MODEL_DIR_MISMATCH_ERR = (
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. ')
_SAVE_SUMMARY_STEPS_ERR = 'save_summary_steps should be >= 0'
_SAVE_CKPT_STEPS_ERR = 'save_checkpoints_steps should be >= 0'
_SAVE_CKPT_SECS_ERR = 'save_checkpoints_secs should be >= 0'
_SESSION_CONFIG_ERR = 'session_config must be instance of ConfigProto'
_KEEP_CKPT_MAX_ERR = 'keep_checkpoint_max should be >= 0'
_KEEP_CKPT_HOURS_ERR = 'keep_checkpoint_every_n_hours should be > 0'
_TF_RANDOM_SEED_ERR = 'tf_random_seed must be integer'
_DEVICE_FN_ERR = 'device_fn must be callable with exactly one argument "op".'
_ONE_CHIEF_ERR = 'The "cluster" in TF_CONFIG must have only one "chief" node.'
_ONE_MASTER_ERR = 'The "cluster" in TF_CONFIG must have only one "master" node.'
_INVALID_TASK_TYPE_FOR_EVAL_MASTER = (
'Key.*eval.*master.*should not be set for task type other than')
_MISSING_CHIEF_ERR = 'If "cluster" is set .* it must have one "chief" node'
_MISSING_TASK_TYPE_ERR = 'If "cluster" is set .* task type must be set'
_MISSING_TASK_ID_ERR = 'If "cluster" is set .* task index must be set'
_INVALID_TASK_INDEX_ERR = 'is not a valid task_id'
_NEGATIVE_TASK_INDEX_ERR = 'Task index must be non-negative number.'
_INVALID_TASK_TYPE_ERR = 'is not a valid task_type'
_INVALID_TASK_TYPE_FOR_LOCAL_ERR = (
'If "cluster" is not set in TF_CONFIG, task type must be WORKER.')
_INVALID_TASK_INDEX_FOR_LOCAL_ERR = (
'If "cluster" is not set in TF_CONFIG, task index must be 0.')
_INVALID_EVALUATOR_IN_CLUSTER_WITH_MASTER_ERR = (
'If `master` node exists in `cluster`, task_type `evaluator` is not '
'supported.')
_INVALID_CHIEF_IN_CLUSTER_WITH_MASTER_ERR = (
'If `master` node exists in `cluster`, job `chief` is not supported.')
_INVALID_SERVICE_TYPE_ERR = (
'If "service" is set in TF_CONFIG, it must be a dict. Given')
def _create_run_config_with_cluster_spec(tf_config, **kwargs):
with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
return run_config_lib.RunConfig(**kwargs)
class RunConfigTest(test.TestCase):
def test_default_property_values(self):
config = run_config_lib.RunConfig()
self.assertIsNone(config.model_dir)
self.assertIsNone(config.session_config)
self.assertIsNone(config.tf_random_seed)
self.assertEqual(100, config.save_summary_steps)
self.assertEqual(600, config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertEqual(5, config.keep_checkpoint_max)
self.assertEqual(10000, config.keep_checkpoint_every_n_hours)
self.assertIsNone(config.service)
self.assertIsNone(config.device_fn)
def test_model_dir(self):
empty_config = run_config_lib.RunConfig()
self.assertIsNone(empty_config.model_dir)
new_config = empty_config.replace(model_dir=_TEST_DIR)
self.assertEqual(_TEST_DIR, new_config.model_dir)
def test_replace_with_allowed_properties(self):
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
device_fn = lambda op: '/cpu:0'
config = run_config_lib.RunConfig().replace(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_secs=14,
session_config=session_config,
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17,
device_fn=device_fn)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(session_config, config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
self.assertEqual(device_fn, config.device_fn)
def test_replace_none_value(self):
config = run_config_lib.RunConfig().replace(
tf_random_seed=None,
model_dir=None,
save_summary_steps=None,
save_checkpoints_secs=None,
save_checkpoints_steps=None,
session_config=None,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None,
device_fn=None)
self.assertIsNone(config.tf_random_seed)
self.assertIsNone(config.model_dir)
self.assertIsNone(config.save_summary_steps)
self.assertIsNone(config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertIsNone(config.session_config)
self.assertIsNone(config.keep_checkpoint_max)
self.assertIsNone(config.keep_checkpoint_every_n_hours)
self.assertIsNone(config.device_fn)
def test_replace_with_disallowallowed_properties(self):
config = run_config_lib.RunConfig()
with self.assertRaises(ValueError):
# tf_random_seed is not allowed to be replaced.
config.replace(master='_master')
with self.assertRaises(ValueError):
config.replace(some_undefined_property=123)
def test_replace(self):
config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(
ValueError, _NOT_SUPPORTED_REPLACE_PROPERTY_MSG):
# master is not allowed to be replaced.
config.replace(master=_MASTER)
with self.assertRaisesRegexp(
ValueError, _NOT_SUPPORTED_REPLACE_PROPERTY_MSG):
config.replace(some_undefined_property=_MASTER)
def test_replace_invalid_values(self):
config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
config.replace(model_dir='')
with self.assertRaisesRegexp(ValueError, _SAVE_SUMMARY_STEPS_ERR):
config.replace(save_summary_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_STEPS_ERR):
config.replace(save_checkpoints_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_SECS_ERR):
config.replace(save_checkpoints_secs=-1)
with self.assertRaisesRegexp(ValueError, _SESSION_CONFIG_ERR):
config.replace(session_config={})
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_MAX_ERR):
config.replace(keep_checkpoint_max=-1)
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_HOURS_ERR):
config.replace(keep_checkpoint_every_n_hours=0)
with self.assertRaisesRegexp(ValueError, _TF_RANDOM_SEED_ERR):
config.replace(tf_random_seed=1.0)
with self.assertRaisesRegexp(ValueError, _DEVICE_FN_ERR):
config.replace(device_fn=lambda x, y: 0)
def test_init_with_allowed_properties(self):
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
device_fn = lambda op: "/cpu:0"
config = run_config_lib.RunConfig(
tf_random_seed=11,
save_summary_steps=12,
save_checkpoints_secs=14,
session_config=session_config,
keep_checkpoint_max=16,
keep_checkpoint_every_n_hours=17,
device_fn=device_fn)
self.assertEqual(11, config.tf_random_seed)
self.assertEqual(12, config.save_summary_steps)
self.assertEqual(14, config.save_checkpoints_secs)
self.assertEqual(session_config, config.session_config)
self.assertEqual(16, config.keep_checkpoint_max)
self.assertEqual(17, config.keep_checkpoint_every_n_hours)
self.assertEqual(device_fn, config.device_fn)
def test_init_none_value(self):
config = run_config_lib.RunConfig(
tf_random_seed=None,
model_dir=None,
save_summary_steps=None,
save_checkpoints_secs=None,
save_checkpoints_steps=None,
session_config=None,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None,
device_fn=None)
self.assertIsNone(config.tf_random_seed)
self.assertIsNone(config.model_dir)
self.assertIsNone(config.save_summary_steps)
self.assertIsNone(config.save_checkpoints_secs)
self.assertIsNone(config.save_checkpoints_steps)
self.assertIsNone(config.session_config)
self.assertIsNone(config.keep_checkpoint_max)
self.assertIsNone(config.keep_checkpoint_every_n_hours)
self.assertIsNone(config.device_fn)
def test_init_invalid_values(self):
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
run_config_lib.RunConfig(model_dir='')
with self.assertRaisesRegexp(ValueError, _SAVE_SUMMARY_STEPS_ERR):
run_config_lib.RunConfig(save_summary_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_STEPS_ERR):
run_config_lib.RunConfig(save_checkpoints_steps=-1)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_SECS_ERR):
run_config_lib.RunConfig(save_checkpoints_secs=-1)
with self.assertRaisesRegexp(ValueError, _SESSION_CONFIG_ERR):
run_config_lib.RunConfig(session_config={})
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_MAX_ERR):
run_config_lib.RunConfig(keep_checkpoint_max=-1)
with self.assertRaisesRegexp(ValueError, _KEEP_CKPT_HOURS_ERR):
run_config_lib.RunConfig(keep_checkpoint_every_n_hours=0)
with self.assertRaisesRegexp(ValueError, _TF_RANDOM_SEED_ERR):
run_config_lib.RunConfig(tf_random_seed=1.0)
with self.assertRaisesRegexp(ValueError, _DEVICE_FN_ERR):
run_config_lib.RunConfig(device_fn=lambda x: '/cpu:0')
class RunConfigDistributedSettingTest(test.TestCase):
def _assert_distributed_properties(self, run_config,
expected_cluster_spec,
expected_task_type,
expected_task_id,
expected_master,
expected_evaluation_master,
expected_is_chief,
expected_num_worker_replicas,
expected_num_ps_replicas):
self.assertEqual(expected_cluster_spec, run_config.cluster_spec.as_dict())
self.assertEqual(expected_task_type, run_config.task_type)
self.assertEqual(expected_task_id, run_config.task_id)
self.assertEqual(expected_master, run_config.master)
self.assertEqual(expected_evaluation_master, run_config.evaluation_master)
self.assertEqual(expected_is_chief, run_config.is_chief)
self.assertEqual(expected_num_worker_replicas,
run_config.num_worker_replicas)
self.assertEqual(expected_num_ps_replicas, run_config.num_ps_replicas)
def test_default_values(self):
self._assert_distributed_properties(
run_config=run_config_lib.RunConfig(),
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=0,
expected_master='',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_tf_config_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 0
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self._assert_distributed_properties(
run_config=run_config,
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=0,
expected_master='',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
self.assertEqual(0, run_config.global_id_in_cluster)
self.assertIsNone(run_config.session_config, None)
def test_session_master_for_local(self):
tf_config = {'session_master': '_my_master'}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=0,
expected_master='_my_master',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_eval_session_master_for_local(self):
tf_config = {'eval_session_master': '_my_eval_master'}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=0,
expected_master='',
expected_evaluation_master='_my_eval_master',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_invalid_task_type_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_FOR_LOCAL_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_invalid_task_index_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_INDEX_FOR_LOCAL_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_chief_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.CHIEF,
expected_task_id=0,
expected_master='grpc://host0:0',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_session_master_from_single_node_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
},
'session_master': '_my_master'
}
self.assertEqual('_my_master',
_create_run_config_with_cluster_spec(tf_config).master)
def test_session_master_from_multiple_nodes_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
},
'session_master': '_my_master'
}
self.assertEqual('_my_master',
_create_run_config_with_cluster_spec(tf_config).master)
def test_fail_with_eval_session_master_for_non_evaluator(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
},
'eval_session_master': 'grpc://123',
}
with self.assertRaisesRegexp(
ValueError, _INVALID_TASK_TYPE_FOR_EVAL_MASTER):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_multiple_chief_nodes(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0', 'host:6:6'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
}
with self.assertRaisesRegexp(ValueError, _ONE_CHIEF_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_missing_chief_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
}
with self.assertRaisesRegexp(ValueError, _MISSING_CHIEF_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_single_chief_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.CHIEF,
expected_task_id=0,
expected_master='',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_fail_with_missing_task_type_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_missing_task_index_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
}
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_ID_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_index_is_too_large(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_index(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': -1
}
}
with self.assertRaisesRegexp(ValueError, _NEGATIVE_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_type(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_worker_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=1,
expected_master='grpc://host4:4',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_ps_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.PS,
expected_task_id=0,
expected_master='grpc://host1:1',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_evaluator_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 12
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self._assert_distributed_properties(
run_config=run_config,
expected_cluster_spec={},
expected_task_type=run_config_lib.TaskType.EVALUATOR,
expected_task_id=12,
expected_master='',
expected_evaluation_master='',
expected_is_chief=False, # evaluator is never chief.
expected_num_worker_replicas=0, # evaluator is not in training cluster.
expected_num_ps_replicas=0)
self.assertIsNone(run_config.global_id_in_cluster)
def test_eval_master_for_evaluator(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 12
},
'eval_session_master': 'grpc://123',
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual('grpc://123', run_config.evaluation_master)
def test_fail_with_invalid_task_index_for_evaluator(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': -1
}
}
with self.assertRaisesRegexp(ValueError, _NEGATIVE_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_global_id_in_cluster_for_chief(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(0, run_config.global_id_in_cluster)
def test_global_id_in_cluster_for_worker(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 2,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(3, run_config.global_id_in_cluster)
def test_global_id_in_cluster_for_ps(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(5, run_config.global_id_in_cluster)
def test_global_id_in_cluster_for_multipe_worker_types(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
'worker': ['host3:3', 'host4:4', 'host5:5'],
'other_type': ['host3:1', 'host4:2'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': 'other_type',
'index': 1,
},
}
# Though 'other_type' is defined after 'worker', based on alphabetical
# order, the task type order should be 'chief', 'other_type', 'worker',
# 'ps', where 'chief' and 'ps' are predefined to be the top and last in the
# order list.
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(2, run_config.global_id_in_cluster)
class RunConfigDistributedSettingWithMasterTest(test.TestCase):
def _assert_distributed_properties(self, run_config,
expected_cluster_spec,
expected_task_type,
expected_task_id,
expected_master,
expected_evaluation_master,
expected_is_chief,
expected_num_worker_replicas,
expected_num_ps_replicas):
self.assertEqual(expected_cluster_spec, run_config.cluster_spec.as_dict())
self.assertEqual(expected_task_type, run_config.task_type)
self.assertEqual(expected_task_id, run_config.task_id)
self.assertEqual(expected_master, run_config.master)
self.assertEqual(expected_evaluation_master, run_config.evaluation_master)
self.assertEqual(expected_is_chief, run_config.is_chief)
self.assertEqual(expected_num_worker_replicas,
run_config.num_worker_replicas)
self.assertEqual(expected_num_ps_replicas, run_config.num_ps_replicas)
def test_invalid_task_type_for_local(self):
tf_config = {
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_FOR_LOCAL_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_master_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.MASTER,
expected_task_id=0,
expected_master='grpc://host0:0',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_session_master_in_single_node_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
},
'session_master': '_my_master'
}
self.assertEqual('_my_master',
_create_run_config_with_cluster_spec(tf_config).master)
def test_session_master_in_multiple_nodes_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
},
'session_master': '_my_master'
}
self.assertEqual('_my_master',
_create_run_config_with_cluster_spec(tf_config).master)
def test_fail_with_eval_session_master(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
},
'eval_session_master': 'grpc://123',
}
with self.assertRaisesRegexp(
ValueError, _INVALID_TASK_TYPE_FOR_EVAL_MASTER):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_multiple_master_nodes(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0', 'host:6:6'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
}
with self.assertRaisesRegexp(ValueError, _ONE_MASTER_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_single_master_node(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.MASTER,
expected_task_id=0,
expected_master='',
expected_evaluation_master='',
expected_is_chief=True,
expected_num_worker_replicas=1,
expected_num_ps_replicas=0)
def test_fail_with_missing_task_type_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_missing_task_index_for_distributed(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
}
}
with self.assertRaisesRegexp(ValueError, _MISSING_TASK_ID_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_index_is_too_large(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_index(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': -1
}
}
with self.assertRaisesRegexp(ValueError, _NEGATIVE_TASK_INDEX_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_invalid_task_type(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host3:3']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 0
}
}
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_worker_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.WORKER,
expected_task_id=1,
expected_master='grpc://host4:4',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_ps_tf_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 0
}
}
self._assert_distributed_properties(
run_config=_create_run_config_with_cluster_spec(tf_config),
expected_cluster_spec=tf_config['cluster'],
expected_task_type=run_config_lib.TaskType.PS,
expected_task_id=0,
expected_master='grpc://host1:1',
expected_evaluation_master='',
expected_is_chief=False,
expected_num_worker_replicas=4,
expected_num_ps_replicas=2)
def test_fail_with_evaluator(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError,
_INVALID_EVALUATOR_IN_CLUSTER_WITH_MASTER_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_fail_with_chief(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.CHIEF: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with self.assertRaisesRegexp(ValueError,
_INVALID_CHIEF_IN_CLUSTER_WITH_MASTER_ERR):
_create_run_config_with_cluster_spec(tf_config)
def test_global_id_in_cluster_for_master(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(0, run_config.global_id_in_cluster)
def test_global_id_in_cluster_for_worker(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 2,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(3, run_config.global_id_in_cluster)
def test_global_id_in_cluster_for_ps(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(5, run_config.global_id_in_cluster)
def test_global_id_in_cluster_for_multipe_worker_types(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
'worker': ['host3:3', 'host4:4', 'host5:5'],
'other_type': ['host3:1', 'host4:2'],
run_config_lib.TaskType.PS: ['host6:3', 'host7:4', 'host8:5']
},
'task': {
'type': 'other_type',
'index': 1,
},
}
# Though 'other_type' is defined after 'worker', based on alphabetical
# order, the task type order should be 'chief', 'other_type', 'worker',
# 'ps', where 'chief' and 'ps' are predefined to be the top and last in the
# order list.
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(2, run_config.global_id_in_cluster)
class RunConfigSaveCheckpointsTest(test.TestCase):
def test_save_checkpoint(self):
empty_config = run_config_lib.RunConfig()
self.assertEqual(600, empty_config.save_checkpoints_secs)
self.assertIsNone(empty_config.save_checkpoints_steps)
config_with_steps = empty_config.replace(save_checkpoints_steps=100)
del empty_config
self.assertEqual(100, config_with_steps.save_checkpoints_steps)
self.assertIsNone(config_with_steps.save_checkpoints_secs)
config_with_secs = config_with_steps.replace(save_checkpoints_secs=200)
del config_with_steps
self.assertEqual(200, config_with_secs.save_checkpoints_secs)
self.assertIsNone(config_with_secs.save_checkpoints_steps)
def test_save_checkpoint_both_steps_and_secs_are_not_none(self):
empty_config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_ERR):
empty_config.replace(save_checkpoints_steps=100,
save_checkpoints_secs=200)
with self.assertRaisesRegexp(ValueError, _SAVE_CKPT_ERR):
run_config_lib.RunConfig(save_checkpoints_steps=100,
save_checkpoints_secs=200)
def test_save_checkpoint_both_steps_and_secs_are_none(self):
config_with_secs = run_config_lib.RunConfig()
config_without_ckpt = config_with_secs.replace(
save_checkpoints_steps=None, save_checkpoints_secs=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
def test_save_checkpoint_flip_secs_to_none(self):
config_with_secs = run_config_lib.RunConfig()
config_without_ckpt = config_with_secs.replace(save_checkpoints_secs=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
def test_save_checkpoint_flip_steps_to_none(self):
config_with_steps = run_config_lib.RunConfig().replace(
save_checkpoints_steps=100)
config_without_ckpt = config_with_steps.replace(save_checkpoints_steps=None)
self.assertIsNone(config_without_ckpt.save_checkpoints_steps)
self.assertIsNone(config_without_ckpt.save_checkpoints_secs)
class RunConfigServiceKeyTest(test.TestCase):
def test_arbitrary_key_value_pairs(self):
tf_config = {
'service': {
'key1': [1, 2],
'key2': {'a': 3, 'b': 4},
'key3': 789,
},
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual(tf_config['service'], run_config.service)
def test_missing_service_key(self):
tf_config = {
'model_dir': '/tmp/123',
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertIsNone(run_config.service)
def test_fail_with_non_dict(self):
tf_config = {
'service': 789,
}
with self.assertRaisesRegexp(TypeError, _INVALID_SERVICE_TYPE_ERR):
_create_run_config_with_cluster_spec(tf_config)
class RunConfigModelDirTest(test.TestCase):
def test_default(self):
run_config = run_config_lib.RunConfig()
self.assertIsNone(run_config.model_dir)
def test_model_dir_in_constructor(self):
run_config = run_config_lib.RunConfig(model_dir='/tmp/123')
self.assertEqual('/tmp/123', run_config.model_dir)
def test_model_dir_in_tf_config(self):
tf_config = {
'model_dir': '/tmp/123',
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertEqual('/tmp/123', run_config.model_dir)
def test_model_dir_both_set_in_both_constructor_and_tf_config(self):
model_dir = '/tmp/123'
tf_config = {'model_dir': model_dir}
kwargs = {'model_dir': model_dir}
run_config = _create_run_config_with_cluster_spec(tf_config, **kwargs)
self.assertEqual('/tmp/123', run_config.model_dir)
def test_model_dir_different_in_both_constructor_and_tf_config(self):
tf_config = {'model_dir': '/tmp/123'}
kwargs = {'model_dir': '/tmp/456'}
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_MISMATCH_ERR):
_create_run_config_with_cluster_spec(tf_config, **kwargs)
def test_fail_with_empty_string_in_constructor(self):
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_ERR):
run_config_lib.RunConfig(model_dir='')
def test_fail_with_empty_string_in_tf_config(self):
with self.assertRaisesRegexp(ValueError, _MODEL_DIR_TF_CONFIG_ERR):
tf_config = {'model_dir': ''}
_create_run_config_with_cluster_spec(tf_config)
class RunConfigSessionConfigTest(test.TestCase):
def _assert_equal_session_config(self, session_config,
expected_device_filters):
rewrite_opts = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
expected_session_config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=graph_opts,
device_filters=expected_device_filters)
self.assertEqual(session_config, expected_session_config)
def test_master_session_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self._assert_equal_session_config(run_config.session_config,
['/job:ps', '/job:master'])
def test_chief_session_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self._assert_equal_session_config(run_config.session_config,
['/job:ps', '/job:chief'])
def test_worker_session_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self._assert_equal_session_config(run_config.session_config,
['/job:ps', '/job:worker/task:1'])
def test_ps_session_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self._assert_equal_session_config(
run_config.session_config,
['/job:ps', '/job:worker', '/job:chief', '/job:master'])
def test_evaluator_session_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 0
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertIsNone(run_config.session_config)
def test_other_type_session_config(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
'other_type': ['host3:1', 'host4:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': 'other_type',
'index': 0
}
}
run_config = _create_run_config_with_cluster_spec(tf_config)
self.assertIsNone(run_config.session_config)
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
58310eb70b1d56186ab82bb5923708c9a6cfd67d | b3f22f69401f57e29ea7b3fe7fd921d328abfdb2 | /autohome/items.py | 98d04c298d38af7dedc356a9e5352544c08f6325 | [] | no_license | ivancoacher/autohome | 1f148b4ab6738f226bb2c6683aeafd4e37cde358 | ebf0d213e08a8386b436e03b65a6747aa0de4d43 | refs/heads/master | 2021-05-31T02:27:54.979715 | 2015-11-24T05:18:13 | 2015-11-24T05:18:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class AutohomeItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
content = Field()
| [
"heshang1203@sina.com"
] | heshang1203@sina.com |
5cc795f76bf1ca525800c7786bef520c5fc96657 | 32e716570ab33a201e7581bfa497bfa820729cd8 | /web_3/class_write/server.py | cbfbef35c11da618ca6b434626347f6b6b01a4c6 | [] | no_license | Coder-Chandler/Web | b21bf9213432d1bfe949c00e9c0e507883574d1e | bb1e403ae194aec01896f374607135d24d2cb16f | refs/heads/master | 2021-09-07T11:16:49.516177 | 2018-02-22T05:33:31 | 2018-02-22T05:33:31 | 112,816,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,973 | py | # coding: utf-8
"""
url 的规范
第一个 ? 之前的是 path
? 之后的是 query
http://c.cc/search?a=b&c=d&e=1
PATH /search
QUERY a=b&c=d&e=1
"""
import socket
import urllib.parse
from utils import log
from routes import route_static
from routes import route_dict
# 定义一个 class 用于保存请求的数据
class Request(object):
def __init__(self):
self.method = 'GET'
self.path = ''
self.query = {}
self.body = ''
def form(self):
"""
form 函数用于把 body 解析为一个字典并返回
body 的格式如下 a=b&c=d&e=1
"""
# username=g+u%26a%3F&password=
# username=g u&a?&password=
# TODO, 这实际上算是一个 bug,应该在解析出数据后再去 unquote
body = urllib.parse.unquote(self.body)
args = body.split('&')
f = {}
for arg in args:
k, v = arg.split('=')
f[k] = v
return f
#
request = Request()
def error(request, code=404):
"""
根据 code 返回不同的错误响应
目前只有 404
"""
# 之前上课我说过不要用数字来作为字典的 key
# 但是在 HTTP 协议中 code 都是数字似乎更方便所以打破了这个原则
e = {
404: b'HTTP/1.1 404 NOT FOUND\r\n\r\n<h1>NOT FOUND</h1>',
}
return e.get(code, b'')
def parsed_path(path):
"""
input: message=hello&author=gua
return: {
'message': 'hello',
'author': 'gua',
}
"""
# find函数用于在str中找某一个字符,如果找得到,那么返回0,找不到返回-1
index = path.find('?')
if index == -1:
return path, {}
else:
path, query_string = path.split('?', 1)
args = query_string.split('&')
query = {}
for arg in args:
k, v = arg.split('=')
query[k] = v
return path, query
def response_for_path(path):
# parsed_path 用于把 path 和 query 分离
path, query = parsed_path(path)
request.path = path
request.query = query
log('path and query ->', (path, query))
"""
根据 path 调用相应的处理函数
没有处理的 path 会返回 404
"""
r = {
'/static': route_static,
# '/': route_index,
# '/login': route_login,
# '/messages': route_message,
}
r.update(route_dict)
response = r.get(path, error)
return response(request)
def run(host='', port=3000):
"""
启动服务器
"""
# 初始化 socket 套路
# 使用 with 可以保证程序中断的时候正确关闭 socket 释放占用的端口
log('start at', '{}:{}'.format(host, port))
with socket.socket() as s:
s.bind((host, port))
# 无限循环来处理请求
while True:
# 监听 接受 读取请求数据 解码成字符串
s.listen(5)
connection, address = s.accept()
r = connection.recv(1000)
r = r.decode('utf-8')
log('原始请求', r)
# log('ip and request, {}\n{}'.format(address, request))
# 因为 chrome 会发送空请求导致 split 得到空 list
# 所以这里判断一下防止程序崩溃
if len(r.split()) < 2:
continue
path = r.split()[1]
# 设置 request 的 method
request.method = r.split()[0]
# 把 body 放入 request 中
request.body = r.split('\r\n\r\n', 1)[1]
# 用 response_for_path 函数来得到 path 对应的响应内容
response = response_for_path(path)
# 把响应发送给客户端
connection.sendall(response)
# 处理完请求, 关闭连接
connection.close()
if __name__ == '__main__':
# 生成配置并且运行程序
config = dict(
host='',
port=3000,
)
run(**config)
| [
"ysiwgtus@gmail.com"
] | ysiwgtus@gmail.com |
b39454d27ebf63469af6273933ebd071722291de | abafb3ca13f96972aa1e8fb5bcad61d1c17a02bd | /src/__init__.py | 17dc9efa59dddf02d8ebe478e23ce97a31cb2acb | [] | no_license | rfaulkner/chip_8_emulator | 3bddef0eb483becdfe4d72ed4398166fe73a3cb7 | bf3f9e61e52f50ff559a3e0cf6d7ae3dab703004 | refs/heads/master | 2020-04-06T16:35:22.930728 | 2012-11-25T23:36:41 | 2012-11-25T23:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | __author__ = 'rfaulkner'
| [
"rfaulkner@wikimedia.org"
] | rfaulkner@wikimedia.org |
16e2293557987bc61281e413c7f76f928c35e362 | 8e3b02a45afe51980cdb9081178b8d22192c9000 | /purbeurre_web/dbmanage/tests/mockapi.py | 21e82501784074d34e02eb3bf29459dd1810d566 | [] | no_license | pythonmentor/PurBeurre-p11 | 6af863c78bfc1011aa49ea6b7373f64f6cc5630f | 8c6c1caeae13199001989c499327b272a81c082a | refs/heads/master | 2020-04-29T17:01:05.523609 | 2019-03-18T12:45:59 | 2019-03-18T12:45:59 | 176,283,413 | 0 | 0 | null | 2019-03-18T12:44:32 | 2019-03-18T12:44:32 | null | UTF-8 | Python | false | false | 253,690 | py | mock_api_return = {
"count": 1076,
"skip": 0,
"products": [
{
"ingredients_n_tags": [
"9",
"1-10"
],
"product_name_fr": "Nutella",
"nova_group_debug": " -- ingredients/en:vegetable-oil : 3 -- ingredients/en:milk-powder : 4",
"last_image_dates_tags": [
"2018-12-28",
"2018-12",
"2018"
],
"categories_debug_tags": [],
"ingredients_text_with_allergens": "Sucre, huile de palme, <span class=\"allergen\">noisettes</span> 13 %, <span class=\"allergen\">lait</span> écrémé en poudre 8,7 %, cacao maigre 7,4 %, émulsifiants : lécithines (<span class=\"allergen\">soja</span>), vanilline.",
"pnns_groups_1": "Sugary snacks",
"pnns_groups_2": "Sweets",
"additives_debug_tags": [],
"checkers_tags": [
"beniben"
],
"categories": "Petit-déjeuners, Produits à tartiner, Produits à tartiner sucrés, Pâtes à tartiner, Pâtes à tartiner au chocolat, Pâtes à tartiner aux noisettes et au cacao, Pâtes à tartiner aux noisettes",
"code": "3017620429484",
"countries_tags": [
"en:argentina",
"en:australia",
"en:belgium",
"en:canada",
"en:colombia",
"en:france",
"en:germany",
"en:ireland",
"en:italy",
"en:japan",
"en:luxembourg",
"en:mexico",
"en:netherlands",
"en:portugal",
"en:russia",
"en:saudi-arabia",
"en:south-africa",
"en:spain",
"en:sweden",
"en:switzerland",
"en:united-kingdom",
"en:united-states"
],
"nutrition_data_per": "100g",
"image_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.400.jpg",
"stores_debug_tags": [],
"nutrition_grades_tags": [
"e"
],
"additives_tags": [
"en:e322"
],
"traces_from_ingredients": "",
"last_editor": "openfoodfacts-contributors",
"allergens_from_user": "(de)Milch,Schalenfrüchte,Soja",
"amino_acids_tags": [],
"product_name_it_debug_tags": [],
"last_modified_t": 1546021997,
"additives_prev_original_tags": [
"en:e322"
],
"image_small_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.200.jpg",
"stores": "Intermarché,Carrefour,Colruyt",
"ingredients": [
{
"id": "en:sugar",
"rank": 1,
"text": "Sucre"
},
{
"rank": 2,
"id": "en:palm-oil",
"text": "huile de palme"
},
{
"rank": 3,
"id": "en:hazelnut",
"percent": "13",
"text": "_noisettes_"
},
{
"percent": "8.7",
"text": "_lait_ écrémé en poudre",
"rank": 4,
"id": "en:pasteurized-skim-milk"
},
{
"text": "cacao maigre",
"percent": "7.4",
"id": "fr:cacao-maigre",
"rank": 5
},
{
"text": "émulsifiants",
"id": "en:emulsifier",
"rank": 6
},
{
"text": "vanilline",
"rank": 7,
"id": "fr:vanilline"
},
{
"text": "lécithines",
"id": "en:lecithins"
},
{
"text": "_soja_",
"id": "en:soya"
}
],
"ingredients_text_de_debug_tags": [],
"nutrition_grade_fr": "e",
"product_name_fr_debug_tags": [],
"quantity_debug_tags": [],
"labels_prev_hierarchy": [
"en:gluten-free",
"en:green-dot"
],
"nutrition_score_debug": " -- energy 6 + sat-fat 10 + fr-sat-fat-for-fats 5 + sugars 10 + sodium 0 - fruits 0% 0 - fiber 0 - proteins 3 -- fsa 26 -- fr 26",
"nutrition_grades": "e",
"_keywords": [
"noisette",
"cocoa",
"and",
"aux",
"pate",
"cacao",
"tartiner",
"nougatcreme",
"ferrero",
"haselnusscreme",
"fruhstucke",
"et",
"susse",
"gruner",
"brotaufstriche",
"hazelnut",
"glutenfrei",
"brotaufstrich",
"nutella",
"punkt",
"au",
"spread"
],
"nova_groups_tags": [
"en:4-ultra-processed-food-and-drink-products"
],
"ingredients_text_de": "",
"ingredients_text_it_debug_tags": [],
"pnns_groups_2_tags": [
"sweets"
],
"countries_lc": "de",
"interface_version_modified": "20120622",
"amino_acids_prev_tags": [],
"allergens_from_ingredients": "noisettes, lait, soja",
"photographers_tags": [
"traaf",
"openfoodfacts-contributors",
"kiliweb",
"huzaifa",
"gavinlovesmandy5-gmail-com",
"bevyrose"
],
"expiration_date": "21/10/2017",
"last_image_t": 1546021997,
"serving_quantity": 15,
"origins_debug_tags": [],
"additives_prev_n": 1,
"manufacturing_places_tags": [],
"image_ingredients_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/ingredients_fr.149.100.jpg",
"last_check_dates_tags": [
"2018-11-11",
"2018-11",
"2018"
],
"last_modified_by": 'null',
"entry_dates_tags": [
"2013-04-15",
"2013-04",
"2013"
],
"traces_lc": "de",
"lc": "fr",
"image_front_small_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.200.jpg",
"new_additives_n": 1,
"nutrition_score_beverage": 0,
"correctors_tags": [
"traaf",
"tacite",
"romgn",
"tacinte",
"stephane",
"teolemon",
"aleene",
"scanbot",
"twoflower",
"sebleouf",
"kiliweb",
"beniben",
"jimnastick",
"risingdragoon",
"bleakpatch",
"openfoodfacts-contributors",
"jutest",
"nicolas42",
"moon-rabbit",
"yukafix",
"tometome",
"sil",
"openfoodfactsmx",
"domino33650",
"labeleat",
"jan101"
],
"nutrient_levels": {
"fat": "high",
"salt": "low",
"saturated-fat": "high",
"sugars": "high"
},
"image_ingredients_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/ingredients_fr.149.400.jpg",
"purchase_places_tags": [
"france",
"la-gorgue",
"deutschland"
],
"purchase_places": "France,La Gorgue,Deutschland",
"ingredients_text_fr_debug_tags": [],
"ingredients_original_tags": [
"en:sugar",
"en:palm-oil",
"en:hazelnut",
"en:pasteurized-skim-milk",
"fr:cacao-maigre",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"product_name_it": "Nutella",
"generic_name_de_debug_tags": [],
"traces_tags": [],
"misc_tags": [
"en:nutrition-no-fiber",
"en:nutrition-no-fruits-vegetables-nuts",
"en:nutrition-no-fiber-or-fruits-vegetables-nuts",
"en:nutriscore-computed"
],
"additives_n": 1,
"categories_prev_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"allergens_hierarchy": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"generic_name_it": "",
"update_key": "pnns13",
"created_t": 1366056898,
"unknown_ingredients_n": 0,
"expiration_date_debug_tags": [],
"nutriments": {
"energy_serving": "338",
"salt_value": 0.107,
"energy_100g": "2252",
"sugars": 56.3,
"saturated-fat_serving": 1.59,
"sugars_value": 56.3,
"salt_100g": 0.107,
"sodium_serving": 0.00632,
"energy": "2252",
"cocoa_label": "Cacao (minimum)",
"nutrition-score-fr": 26,
"saturated-fat_100g": 10.6,
"cocoa_100g": 7.3,
"proteins_unit": "g",
"nutrition-score-uk": 26,
"proteins_100g": 6.3,
"sodium_value": 0.0421259842519685,
"sugars_serving": 8.44,
"carbohydrates_serving": 8.62,
"sodium_100g": 0.0421259842519685,
"salt_serving": 0.016,
"nutrition-score-uk_100g": 26,
"proteins_serving": 0.945,
"carbohydrates_100g": 57.5,
"nova-group_serving": "4",
"salt": 0.107,
"carbohydrates": 57.5,
"fat_100g": 30.9,
"sugars_unit": "g",
"salt_unit": "g",
"carbohydrates_value": 57.5,
"proteins": 6.3,
"carbohydrates_unit": "g",
"proteins_value": 6.3,
"energy_unit": "kJ",
"energy_value": "2252",
"saturated-fat_unit": "g",
"nutrition-score-fr_100g": 26,
"sodium": 0.0421259842519685,
"sugars_100g": 56.3,
"cocoa_value": 7.3,
"saturated-fat_value": 10.6,
"saturated-fat": 10.6,
"fat_value": 30.9,
"sodium_unit": "g",
"fat_unit": "g",
"cocoa_unit": "g",
"nova-group": "4",
"cocoa_serving": 7.3,
"nova-group_100g": "4",
"fat_serving": 4.63,
"cocoa": 7.3,
"fat": 30.9
},
"packaging": "bocal,verre,couvercle,Glas,Kunststoff,plastique,Tarro,Plástico",
"nutrition_data_prepared_per_debug_tags": [],
"ingredients_from_palm_oil_tags": [
"huile-de-palme"
],
"minerals_prev_tags": [],
"fruits-vegetables-nuts_100g_estimate": 0,
"ingredients_text": "Sucre, huile de palme, _noisettes_ 13 %, _lait_ écrémé en poudre 8,7 %, cacao maigre 7,4 %, émulsifiants : lécithines (_soja_), vanilline.",
"traces_hierarchy": [],
"nutrition_data_prepared_per": "100g",
"product_name_debug_tags": [],
"labels_debug_tags": [],
"allergens_debug_tags": [],
"languages_codes": {
"fr": 6,
"it": 1,
"de": 4
},
"quality_tags": [],
"ingredients_text_with_allergens_de": "",
"ingredients_debug": [
"Sucre",
",",
'null',
'null',
'null',
" huile de palme",
",",
'null',
'null',
'null',
" _noisettes_ 13 %",
",",
'null',
'null',
'null',
" _lait_ écrémé en poudre 8",
",",
'null',
'null',
'null',
"7 %",
",",
'null',
'null',
'null',
" cacao maigre 7",
",",
'null',
'null',
'null',
"4 %",
",",
'null',
'null',
'null',
" émulsifiants ",
":",
":",
'null',
'null',
" lécithines ",
"(",
"(",
'null',
'null',
"_soja_)",
",",
'null',
'null',
'null',
" vanilline."
],
"nutrition_score_warning_no_fruits_vegetables_nuts": 1,
"countries_debug_tags": [],
"creator": "openfoodfacts-contributors",
"interface_version_created": "20130323.jqm",
"image_ingredients_small_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/ingredients_fr.149.200.jpg",
"ingredients_n": "9",
"product_name_de_debug_tags": [],
"informers_tags": [
"openfoodfacts-contributors",
"traaf",
"tacite",
"romgn",
"beniben",
"risingdragoon",
"nicolas42",
"sil"
],
"url": "https://fr.openfoodfacts.org/produit/3017620429484/nutella-ferrero",
"lang": "fr",
"purchase_places_debug_tags": [],
"pnns_groups_1_tags": [
"sugary-snacks"
],
"countries_beforescanbot": "France, en:belgium",
"serving_size_debug_tags": [],
"unique_scans_n": 2935,
"vitamins_tags": [],
"cities_tags": [],
"vitamins_prev_tags": [],
"image_front_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.400.jpg",
"labels_prev_tags": [
"en:gluten-free",
"en:green-dot"
],
"image_front_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.100.jpg",
"traces": "",
"labels_lc": "de",
"ingredients_text_debug_tags": [],
"nutrition_data_prepared": "",
"generic_name_fr_debug_tags": [],
"categories_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"link_debug_tags": [],
"categories_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"additives_old_tags": [
"en:e322"
],
"debug_param_sorted_langs": [
"fr",
"de",
"it"
],
"allergens_lc": "de",
"ingredients_that_may_be_from_palm_oil_n": 0,
"ingredients_text_debug": "Sucre, huile de palme, _noisettes_ 13 %, _lait_ écrémé en poudre 8,7 %, cacao maigre 7,4 %, émulsifiants : lécithines (_soja_), vanilline.",
"nucleotides_tags": [],
"emb_codes_orig": "",
"traces_from_user": "(de)",
"_id": "3017620429484",
"allergens_tags": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"brands_debug_tags": [],
"selected_images": {
"ingredients": {
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/ingredients_fr.149.400.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/ingredients_fr.149.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/ingredients_fr.149.100.jpg"
}
},
"front": {
"thumb": {
"de": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_de.51.100.jpg",
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.100.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.200.jpg",
"de": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_de.51.200.jpg"
},
"display": {
"de": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_de.51.400.jpg",
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.400.jpg"
}
},
"nutrition": {
"display": {
"de": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_de.191.400.jpg",
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_fr.106.400.jpg"
},
"thumb": {
"de": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_de.191.100.jpg",
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_fr.106.100.jpg"
},
"small": {
"de": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_de.191.200.jpg",
"fr": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_fr.106.200.jpg"
}
}
},
"ingredients_from_or_that_may_be_from_palm_oil_n": 1,
"product_quantity": 825,
"sortkey": 1546021997,
"packaging_debug_tags": [],
"languages_tags": [
"en:french",
"en:german",
"en:italian",
"en:3",
"en:multilingual"
],
"stores_tags": [
"intermarche",
"carrefour",
"colruyt"
],
"ingredients_from_palm_oil_n": 1,
"generic_name_de": "Nuß-Nougat-Krem",
"emb_codes": "",
"quantity": "825 g",
"checked": "on",
"generic_name_fr": "Pâte à tartiner aux noisettes et au cacao",
"last_checker": "beniben",
"minerals_tags": [],
"brands_tags": [
"ferrero",
"nutella"
],
"ingredients_text_with_allergens_fr": "Sucre, huile de palme, <span class=\"allergen\">noisettes</span> 13 %, <span class=\"allergen\">lait</span> écrémé en poudre 8,7 %, cacao maigre 7,4 %, émulsifiants : lécithines (<span class=\"allergen\">soja</span>), vanilline.",
"image_nutrition_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_fr.106.100.jpg",
"languages": {
"en:french": 6,
"en:italian": 1,
"en:german": 4
},
"labels_tags": [
"en:gluten-free",
"en:green-dot"
],
"scans_n": 3406,
"image_nutrition_small_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_fr.106.200.jpg",
"last_checked_t": 1541938818,
"product_name_de": "Nutella",
"labels": "Glutenfrei,Grüner Punkt",
"ingredients_text_with_allergens_it": "",
"nucleotides_prev_tags": [],
"serving_size": "15g",
"ingredients_hierarchy": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"en:pasteurized-skim-milk",
"en:milk-powder",
"fr:cacao-maigre",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"additives_original_tags": [
"en:e322"
],
"origins": "",
"nova_group_tags": [
"not-applicable"
],
"additives_old_n": 1,
"languages_hierarchy": [
"en:french",
"en:german",
"en:italian"
],
"lang_debug_tags": [],
"additives_prev_tags": [
"en:e322"
],
"unknown_nutrients_tags": [],
"generic_name_it_debug_tags": [],
"countries": "Argentinien,Australien,Belgien,Kanada,Kolumbien,Frankreich,Deutschland,Irland,Italien,Japan,Luxemburg,Mexiko,Niederlande,Portugal,Russland,Saudi-Arabien,Südafrika,Spanien,Schweden,Schweiz,Vereinigtes Königreich,Vereinigte Staaten von Amerika",
"emb_codes_20141016": "",
"no_nutrition_data": "",
"manufacturing_places_debug_tags": [],
"nutrition_data": "on",
"images": {
"1": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 2322,
"h": 4128
}
},
"uploaded_t": 1384310363,
"uploader": "traaf"
},
"2": {
"uploader": "traaf",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 2322,
"h": 4128
}
},
"uploaded_t": 1384310369
},
"3": {
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 4128,
"w": 2322
}
},
"uploaded_t": 1384310372,
"uploader": "traaf"
},
"4": {
"uploaded_t": 1384310377,
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 2322,
"h": 4128
}
},
"uploader": "traaf"
},
"6": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 75,
"w": 100
},
"400": {
"h": 300,
"w": 400
},
"full": {
"h": 1500,
"w": 2000
}
},
"uploaded_t": "1454138226"
},
"8": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 1125,
"h": 2000
}
},
"uploaded_t": "1457273346"
},
"11": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 2000,
"h": 3561
}
},
"uploaded_t": "1457633585",
"uploader": "openfoodfacts-contributors"
},
"12": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2448,
"h": 3264
}
},
"uploaded_t": "1457782534"
},
"14": {
"uploaded_t": "1462963594",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 2000,
"w": 1125
}
},
"uploader": "openfoodfacts-contributors"
},
"17": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 1125,
"h": 2000
}
},
"uploaded_t": "1485969265"
},
"22": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1490774540",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2448,
"h": 3264
}
}
},
"23": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1491241437",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 2000,
"w": 1125
}
}
},
"24": {
"uploaded_t": "1491241469",
"sizes": {
"100": {
"h": 56,
"w": 100
},
"400": {
"h": 225,
"w": 400
},
"full": {
"h": 1125,
"w": 2000
}
},
"uploader": "openfoodfacts-contributors"
},
"25": {
"uploaded_t": "1491241488",
"sizes": {
"100": {
"h": 56,
"w": 100
},
"400": {
"w": 400,
"h": 225
},
"full": {
"h": 1125,
"w": 2000
}
},
"uploader": "openfoodfacts-contributors"
},
"26": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1491241500",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 1125,
"h": 2000
}
}
},
"37": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 100,
"h": 100
},
"400": {
"h": 400,
"w": 400
},
"full": {
"w": 2000,
"h": 2000
}
},
"uploaded_t": "1502519149"
},
"44": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 1500,
"h": 2000
}
},
"uploaded_t": "1505972873"
},
"45": {
"uploaded_t": "1511895483",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 2000,
"w": 1125
}
},
"uploader": "openfoodfacts-contributors"
},
"47": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1513020941",
"sizes": {
"100": {
"h": 75,
"w": 100
},
"400": {
"w": 400,
"h": 300
},
"full": {
"h": 1500,
"w": 2000
}
}
},
"48": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 2000,
"w": 1125
}
},
"uploaded_t": "1514304572"
},
"49": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"w": 400,
"h": 300
},
"full": {
"h": 1500,
"w": 2000
}
},
"uploaded_t": "1515586682"
},
"51": {
"uploaded_t": "1516641326",
"sizes": {
"100": {
"h": 85,
"w": 100
},
"400": {
"h": 342,
"w": 400
},
"full": {
"h": 1360,
"w": 1591
}
},
"uploader": "kiliweb"
},
"52": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1516724011",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 1500,
"h": 2000
}
}
},
"54": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1517655391",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 2000,
"h": 2666
}
}
},
"56": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 1200,
"w": 901
}
},
"uploaded_t": "1519212536",
"uploader": "kiliweb"
},
"57": {
"uploader": "kiliweb",
"uploaded_t": "1519251497",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 901,
"h": 1200
}
}
},
"63": {
"uploaded_t": "1521562025",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 901,
"h": 1200
}
},
"uploader": "kiliweb"
},
"64": {
"uploader": "kiliweb",
"sizes": {
"100": {
"w": 28,
"h": 100
},
"400": {
"w": 110,
"h": 400
},
"full": {
"w": 331,
"h": 1200
}
},
"uploaded_t": "1521562027"
},
"66": {
"sizes": {
"100": {
"w": 100,
"h": 56
},
"400": {
"w": 400,
"h": 225
},
"full": {
"h": 1125,
"w": 2000
}
},
"uploaded_t": "1525120673",
"uploader": "openfoodfacts-contributors"
},
"75": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 56,
"w": 100
},
"400": {
"h": 225,
"w": 400
},
"full": {
"w": 2484,
"h": 1396
}
},
"uploaded_t": 1536950355
},
"77": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1537636288,
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"h": 299,
"w": 400
},
"full": {
"h": 1936,
"w": 2592
}
}
},
"78": {
"uploaded_t": 1539283573,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3328,
"w": 2496
}
},
"uploader": "openfoodfacts-contributors"
},
"79": {
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": 1540026556,
"uploader": "openfoodfacts-contributors"
},
"80": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1542697537,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
}
},
"81": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1542819968,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 960,
"h": 1280
}
}
},
"82": {
"uploader": "gavinlovesmandy5-gmail-com",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 4032,
"w": 3024
}
},
"uploaded_t": 1542942677
},
"83": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": 1543074039,
"uploader": "openfoodfacts-contributors"
},
"84": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 2448,
"h": 3264
}
},
"uploaded_t": 1543404482
},
"85": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2448,
"h": 3264
}
},
"uploaded_t": 1544028230
},
"86": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploaded_t": 1544787102
},
"87": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1545310737,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 4032,
"w": 3024
}
}
},
"88": {
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"w": 400,
"h": 300
},
"full": {
"w": 3264,
"h": 2448
}
},
"uploaded_t": 1545857392,
"uploader": "bevyrose"
},
"89": {
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": 1545857404,
"uploader": "bevyrose"
},
"90": {
"uploader": "bevyrose",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2448,
"h": 3264
}
},
"uploaded_t": 1545857416
},
"91": {
"uploader": "bevyrose",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2448,
"h": 3264
}
},
"uploaded_t": 1545857439
},
"92": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": 1546021996
},
"ingredients_fr": {
"geometry": "0x0-0-0",
"angle": "90",
"normalize": "false",
"y1": "0",
"imgid": "64",
"white_magic": "false",
"sizes": {
"100": {
"h": 28,
"w": 100
},
"200": {
"h": 55,
"w": 200
},
"400": {
"h": 110,
"w": 400
},
"full": {
"w": 1200,
"h": 331
}
},
"rev": "149",
"y2": "0",
"x1": "0",
"x2": "0"
},
"front_de": {
"y1": "65.23333740234375",
"normalize": "false",
"angle": "0",
"imgid": "8",
"geometry": "830x1230-196-326",
"rev": "51",
"x2": "205.25",
"x1": "39.25",
"y2": "311.23333740234375",
"white_magic": "false",
"sizes": {
"100": {
"h": 100,
"w": 67
},
"200": {
"h": 200,
"w": 135
},
"400": {
"w": 270,
"h": 400
},
"full": {
"w": 830,
"h": 1230
}
}
},
"nutrition_fr": {
"x2": 'null',
"x1": 'null',
"y2": 'null',
"rev": "106",
"sizes": {
"100": {
"h": 85,
"w": 100
},
"200": {
"w": 200,
"h": 171
},
"400": {
"w": 400,
"h": 342
},
"full": {
"h": 1360,
"w": 1591
}
},
"white_magic": "0",
"imgid": "51",
"y1": 'null',
"normalize": "0",
"angle": 'null',
"geometry": "0x0-0-0"
},
"front_fr": {
"y2": "391.03334045410156",
"x2": "271.75",
"x1": "26.75",
"rev": "174",
"sizes": {
"100": {
"w": "66",
"h": "100"
},
"200": {
"h": 200,
"w": 132
},
"400": {
"h": 400,
"w": 265
},
"full": {
"h": 1113,
"w": 736
}
},
"white_magic": "false",
"imgid": "56",
"angle": "0",
"normalize": "false",
"y1": "20.033340454101562",
"geometry": "736x1113-80-60"
},
"front": {
"white_magic": 'null',
"geometry": "0x0--10--10",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"200": {
"w": 113,
"h": 200
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 2322,
"h": 4128
}
},
"normalize": "checked",
"rev": "8",
"imgid": "2"
},
"ingredients": {
"geometry": "1723x1104-299-1331",
"sizes": {
"100": {
"w": 100,
"h": 64
},
"200": {
"w": 200,
"h": 128
},
"400": {
"h": 256,
"w": 400
},
"full": {
"h": 1104,
"w": 1723
}
},
"white_magic": 'null',
"imgid": "1",
"normalize": "checked",
"rev": "10"
},
"nutrition": {
"white_magic": 'null',
"sizes": {
"100": {
"h": 81,
"w": 100
},
"200": {
"w": 200,
"h": 162
},
"400": {
"w": 400,
"h": 324
},
"full": {
"h": 1104,
"w": 1362
}
},
"geometry": "1362x1104-485-1403",
"rev": "11",
"normalize": "checked",
"imgid": "3"
},
"nutrition_de": {
"white_magic": "false",
"sizes": {
"100": {
"h": 99,
"w": 100
},
"200": {
"w": 200,
"h": 199
},
"400": {
"w": 400,
"h": 397
},
"full": {
"w": 1022,
"h": 1015
}
},
"rev": "191",
"y2": "252.88311767578125",
"x1": "41.566650390625",
"x2": "156.566650390625",
"geometry": "1022x1015-369-1236",
"normalize": "false",
"angle": "0",
"y1": "138.88311767578125",
"imgid": "11"
}
},
"image_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/front_fr.174.100.jpg",
"emb_codes_debug_tags": [],
"max_imgid": "92",
"editors": [
"",
"traaf",
"tacinte",
"romgn",
"tacite",
"stephane",
"nicolasleger"
],
"rev": 193,
"ingredients_text_fr": "Sucre, huile de palme, _noisettes_ 13 %, _lait_ écrémé en poudre 8,7 %, cacao maigre 7,4 %, émulsifiants : lécithines (_soja_), vanilline.",
"categories_lc": "fr",
"complete": 1,
"id": "3017620429484",
"allergens": "en:milk,en:nuts,en:soybeans",
"image_nutrition_url": "https://static.openfoodfacts.org/images/products/301/762/042/9484/nutrition_fr.106.400.jpg",
"editors_tags": [
"stephane",
"romgn",
"openfoodfacts-contributors",
"nicolas42",
"tacinte",
"jutest",
"domino33650",
"bevyrose",
"moon-rabbit",
"aleene",
"nicolasleger",
"scanbot",
"tacite",
"labeleat",
"beniben",
"tometome",
"risingdragoon",
"teolemon",
"zoneblockscommunity",
"jimnastick",
"sebleouf",
"huzaifa",
"sil",
"jan101",
"traaf",
"date-limite-app",
"gavinlovesmandy5-gmail-com",
"kiliweb",
"openfoodfactsmx",
"yukafix",
"twoflower",
"bleakpatch"
],
"nutrition_data_per_debug_tags": [],
"states_hierarchy": [
"en:checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"labels_hierarchy": [
"en:gluten-free",
"en:green-dot"
],
"last_edit_dates_tags": [
"2018-12-28",
"2018-12",
"2018"
],
"emb_codes_tags": [],
"codes_tags": [
"code-13",
"3017620429484",
"301762042948x",
"30176204294xx",
"3017620429xxx",
"301762042xxxx",
"30176204xxxxx",
"3017620xxxxxx",
"301762xxxxxxx",
"30176xxxxxxxx",
"3017xxxxxxxxx",
"301xxxxxxxxxx",
"30xxxxxxxxxxx",
"3xxxxxxxxxxxx"
],
"origins_tags": [],
"product_name": "Nutella",
"nova_group": "4",
"manufacturing_places": "",
"states": "en:checked, en:complete, en:nutrition-facts-completed, en:ingredients-completed, en:expiration-date-completed, en:packaging-code-to-be-completed, en:characteristics-completed, en:categories-completed, en:brands-completed, en:packaging-completed, en:quantity-completed, en:product-name-completed, en:photos-validated, en:photos-uploaded",
"traces_debug_tags": [],
"ingredients_that_may_be_from_palm_oil_tags": [],
"nutrition_score_warning_no_fiber": 1,
"ingredients_tags": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"en:pasteurized-skim-milk",
"en:milk-powder",
"fr:cacao-maigre",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"ingredients_text_it": "",
"generic_name": "Pâte à tartiner aux noisettes et au cacao",
"completed_t": 1541938818,
"link": "https://www.nutella.com/de/de",
"nova_groups": "4",
"categories_prev_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"brands": "Ferrero,Nutella",
"nutrient_levels_tags": [
"en:fat-in-high-quantity",
"en:saturated-fat-in-high-quantity",
"en:sugars-in-high-quantity",
"en:salt-in-low-quantity"
],
"states_tags": [
"en:checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"countries_hierarchy": [
"en:argentina",
"en:australia",
"en:belgium",
"en:canada",
"en:colombia",
"en:france",
"en:germany",
"en:ireland",
"en:italy",
"en:japan",
"en:luxembourg",
"en:mexico",
"en:netherlands",
"en:portugal",
"en:russia",
"en:saudi-arabia",
"en:south-africa",
"en:spain",
"en:sweden",
"en:switzerland",
"en:united-kingdom",
"en:united-states"
],
"ingredients_ids_debug": [
"sucre",
"huile-de-palme",
"noisettes-13",
"lait-ecreme-en-poudre-8",
"7",
"cacao-maigre-7",
"4",
"emulsifiants",
"lecithines",
"soja",
"vanilline"
],
"packaging_tags": [
"bocal",
"verre",
"couvercle",
"glas",
"kunststoff",
"plastique",
"tarro",
"plastico"
]
},
{
"created_t": 1360583367,
"unknown_ingredients_n": 0,
"packaging": "Bocal,pot,verre",
"expiration_date_debug_tags": [],
"nutriments": {
"sugars_100g": 56.8,
"sodium": 0.0448818897637795,
"nutrition-score-fr_100g": 26,
"saturated-fat_unit": "g",
"energy_value": "2278",
"energy_unit": "kJ",
"proteins_value": "6",
"carbohydrates_unit": "g",
"proteins": "6",
"carbohydrates_value": "57.6",
"salt_unit": "g",
"sugars_unit": "g",
"fat_100g": 31.6,
"fat": 31.6,
"fat_serving": 4.74,
"nova-group_100g": "4",
"nova-group": "4",
"fat_unit": "g",
"fat_value": "31.6",
"sodium_unit": "g",
"saturated-fat": "11",
"saturated-fat_value": "11",
"proteins_100g": "6",
"nutrition-score-uk": 26,
"proteins_unit": "g",
"saturated-fat_100g": "11",
"nutrition-score-fr": 26,
"energy": "2278",
"sodium_serving": 0.00673,
"salt_100g": 0.114,
"sugars_value": "56.8",
"sugars": 56.8,
"saturated-fat_serving": 1.65,
"energy_100g": "2278",
"salt_value": "0.114",
"energy_serving": "342",
"salt": 0.114,
"carbohydrates": 57.6,
"nova-group_serving": "4",
"salt_serving": 0.0171,
"carbohydrates_100g": 57.6,
"nutrition-score-uk_100g": 26,
"proteins_serving": 0.9,
"sodium_100g": 0.0448818897637795,
"carbohydrates_serving": 8.64,
"sugars_serving": 8.52,
"sodium_value": "0.04488188976377953"
},
"nutrition_data_prepared_per_debug_tags": [],
"ingredients_from_palm_oil_tags": [
"huile-de-palme"
],
"minerals_prev_tags": [],
"ingredients_text": "Sucre, huile de palme, _noisettes_ 13%, cacao maigre 7,4%, _lait_ écrémé en poudre 6,6%, _lactosérum_ en poudre, émulsifiants : lécithines (_soja_), vanilline.",
"fruits-vegetables-nuts_100g_estimate": 0,
"traces_hierarchy": [],
"new_additives_n": 1,
"image_ingredients_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/ingredients_fr.22.400.jpg",
"purchase_places_tags": [
"france"
],
"nutrient_levels": {
"saturated-fat": "high",
"sugars": "high",
"fat": "high",
"salt": "low"
},
"nutrition_score_beverage": 0,
"correctors_tags": [
"fabricetheytaz",
"scanbot",
"tacite",
"nicolasleger",
"date-limite-app",
"greyxor",
"teolemon",
"twoflower",
"the-redburn",
"sebleouf"
],
"purchase_places": "France",
"ingredients_original_tags": [
"en:sugar",
"en:palm-oil",
"en:hazelnut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:whey-powder",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"ingredients_text_fr_debug_tags": [],
"additives_n": 1,
"misc_tags": [
"en:nutrition-no-fiber",
"en:nutrition-no-fruits-vegetables-nuts",
"en:nutrition-no-fiber-or-fruits-vegetables-nuts",
"en:nutriscore-computed"
],
"traces_tags": [],
"generic_name_de_debug_tags": [],
"categories_prev_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"allergens_hierarchy": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"update_key": "pnns12",
"informers_tags": [
"openfoodfacts-contributors",
"fabricetheytaz",
"tacite",
"jeanbono",
"the-redburn"
],
"lang": "fr",
"url": "https://fr.openfoodfacts.org/produit/3017624047813/nutella",
"purchase_places_debug_tags": [],
"pnns_groups_1_tags": [
"sugary-snacks"
],
"countries_beforescanbot": "France,Switzerland",
"serving_size_debug_tags": [],
"nutrition_data_prepared_per": "100g",
"allergens_debug_tags": [],
"product_name_debug_tags": [],
"labels_debug_tags": [],
"languages_codes": {
"de": 2,
"fr": 6
},
"quality_tags": [],
"nutrition_score_warning_no_fruits_vegetables_nuts": 1,
"countries_debug_tags": [],
"ingredients_debug": [
"Sucre",
",",
'null',
'null',
'null',
" huile de palme",
",",
'null',
'null',
'null',
" _noisettes_ 13%",
",",
'null',
'null',
'null',
" cacao maigre 7",
",",
'null',
'null',
'null',
"4%",
",",
'null',
'null',
'null',
" _lait_ écrémé en poudre 6",
",",
'null',
'null',
'null',
"6%",
",",
'null',
'null',
'null',
" _lactosérum_ en poudre",
",",
'null',
'null',
'null',
" émulsifiants ",
":",
":",
'null',
'null',
" lécithines ",
"(",
"(",
'null',
'null',
"_soja_)",
",",
'null',
'null',
'null',
" vanilline."
],
"ingredients_text_with_allergens_de": "Test",
"creator": "openfoodfacts-contributors",
"interface_version_created": "20120622",
"product_name_de_debug_tags": [],
"image_ingredients_small_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/ingredients_fr.22.200.jpg",
"ingredients_n": "10",
"nutrition_grades_tags": [
"e"
],
"stores_debug_tags": [],
"last_editor": "openfoodfacts-contributors",
"additives_tags": [
"en:e322"
],
"traces_from_ingredients": "",
"amino_acids_tags": [],
"allergens_from_user": "(fr)Lait,Fruits à coque,Soja",
"last_modified_t": 1544179572,
"additives_prev_original_tags": [
"en:e322"
],
"stores": "",
"image_small_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.200.jpg",
"ingredients_text_de_debug_tags": [],
"ingredients": [
{
"rank": 1,
"id": "en:sugar",
"text": "Sucre"
},
{
"id": "en:palm-oil",
"rank": 2,
"text": "huile de palme"
},
{
"id": "en:hazelnut",
"rank": 3,
"text": "_noisettes_",
"percent": "13"
},
{
"percent": "7.4",
"text": "cacao maigre",
"rank": 4,
"id": "fr:cacao-maigre"
},
{
"text": "_lait_ écrémé en poudre",
"percent": "6.6",
"id": "en:pasteurized-skim-milk",
"rank": 5
},
{
"text": "_lactosérum_ en poudre",
"id": "en:whey-powder",
"rank": 6
},
{
"rank": 7,
"id": "en:emulsifier",
"text": "émulsifiants"
},
{
"rank": 8,
"id": "fr:vanilline",
"text": "vanilline"
},
{
"text": "lécithines",
"id": "en:lecithins"
},
{
"id": "en:soya",
"text": "_soja_"
}
],
"product_name_fr": "Nutella",
"ingredients_n_tags": [
"10",
"1-10"
],
"nova_group_debug": " -- ingredients/en:sugar : 3 -- ingredients/en:whey : 4",
"last_image_dates_tags": [
"2018-12-07",
"2018-12",
"2018"
],
"additives_debug_tags": [],
"pnns_groups_2": "Sweets",
"pnns_groups_1": "Sugary snacks",
"categories_debug_tags": [],
"ingredients_text_with_allergens": "Sucre, huile de palme, <span class=\"allergen\">noisettes</span> 13%, cacao maigre 7,4%, <span class=\"allergen\">lait</span> écrémé en poudre 6,6%, <span class=\"allergen\">lactosérum</span> en poudre, émulsifiants : lécithines (<span class=\"allergen\">soja</span>), vanilline.",
"code": "3017624047813",
"categories": "Petit-déjeuners, Produits à tartiner, Produits à tartiner sucrés, Pâtes à tartiner, Pâtes à tartiner au chocolat, Pâtes à tartiner aux noisettes, Pâtes à tartiner aux noisettes et au cacao",
"checkers_tags": [],
"countries_tags": [
"en:algeria",
"en:belgium",
"en:canada",
"en:france",
"en:germany",
"en:italy",
"en:mali",
"en:martinique",
"en:spain",
"en:switzerland"
],
"nutrition_data_per": "100g",
"image_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.400.jpg",
"photographers_tags": [
"openfoodfacts-contributors",
"teolemon",
"jeanbono"
],
"expiration_date": "",
"allergens_from_ingredients": "noisettes, lait, lactosérum, soja",
"origins_debug_tags": [],
"additives_prev_n": 1,
"serving_quantity": 15,
"last_image_t": 1544179572,
"manufacturing_places_tags": [],
"image_ingredients_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/ingredients_fr.22.100.jpg",
"last_modified_by": 'null',
"image_front_small_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.200.jpg",
"lc": "fr",
"entry_dates_tags": [
"2013-02-11",
"2013-02",
"2013"
],
"nutrition_grade_fr": "e",
"product_name_fr_debug_tags": [],
"quantity_debug_tags": [],
"nutrition_score_debug": " -- energy 6 + sat-fat 10 + fr-sat-fat-for-fats 5 + sugars 10 + sodium 0 - fruits 0% 0 - fiber 0 - proteins 3 -- fsa 26 -- fr 26",
"labels_prev_hierarchy": [],
"nova_groups_tags": [
"en:4-ultra-processed-food-and-drink-products"
],
"_keywords": [
"et",
"pate",
"chocolat",
"sucre",
"petit-dejeuner",
"nutella",
"tartiner",
"produit",
"cacao",
"au",
"aux",
"noisette",
"ferrero"
],
"nutrition_grades": "e",
"pnns_groups_2_tags": [
"sweets"
],
"ingredients_text_de": "Test",
"interface_version_modified": "20120622",
"countries_lc": "fr",
"amino_acids_prev_tags": [],
"emb_codes_debug_tags": [],
"max_imgid": "39",
"editors": [
"",
"jeanbono",
"teolemon",
"tacite",
"scanbot",
"fabricetheytaz",
"nicolasleger"
],
"rev": 74,
"id": "3017624047813",
"complete": 1,
"categories_lc": "fr",
"ingredients_text_fr": "Sucre, huile de palme, _noisettes_ 13%, cacao maigre 7,4%, _lait_ écrémé en poudre 6,6%, _lactosérum_ en poudre, émulsifiants : lécithines (_soja_), vanilline.",
"image_nutrition_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/nutrition_fr.23.400.jpg",
"allergens": "en:milk,en:nuts,en:soybeans",
"languages_hierarchy": [
"en:french",
"en:german"
],
"lang_debug_tags": [],
"additives_prev_tags": [
"en:e322"
],
"unknown_nutrients_tags": [],
"emb_codes_20141016": "",
"countries": "Algérie, Belgique, Canada, France, Allemagne, Italie, Mali, Martinique, Espagne, Suisse",
"manufacturing_places_debug_tags": [],
"no_nutrition_data": "",
"image_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.100.jpg",
"images": {
"1": {
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"h": 300,
"w": 400
},
"full": {
"w": 3264,
"h": 2448
}
},
"uploaded_t": 1360583368,
"uploader": "openfoodfacts-contributors"
},
"2": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 299
},
"full": {
"w": 1936,
"h": 2592
}
},
"uploaded_t": 1387567194
},
"3": {
"uploader": "teolemon",
"uploaded_t": 1408104480,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
}
},
"4": {
"uploader": "teolemon",
"uploaded_t": 1408104498,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 3264,
"w": 2448
}
}
},
"5": {
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": 1408104512,
"uploader": "teolemon"
},
"6": {
"uploader": "teolemon",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": 1408104530
},
"7": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 2000,
"w": 1125
}
},
"uploaded_t": 1413733200,
"uploader": "openfoodfacts-contributors"
},
"8": {
"uploaded_t": 1422901187,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 1500,
"h": 2000
}
},
"uploader": "jeanbono"
},
"9": {
"uploader": "jeanbono",
"uploaded_t": 1422901207,
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"w": 400,
"h": 300
},
"full": {
"w": 2000,
"h": 1500
}
}
},
"10": {
"uploaded_t": 1422901220,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 1500,
"h": 2000
}
},
"uploader": "jeanbono"
},
"16": {
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 2000,
"w": 1125
}
},
"uploaded_t": "1473952139",
"uploader": "openfoodfacts-contributors"
},
"32": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1540658899,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 2576,
"w": 1932
}
}
},
"33": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 1932,
"h": 2576
}
},
"uploaded_t": 1540658924
},
"34": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1540658936,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 1932,
"h": 2576
}
}
},
"35": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 1932,
"h": 2576
}
},
"uploaded_t": 1540658947,
"uploader": "openfoodfacts-contributors"
},
"36": {
"uploaded_t": 1540658960,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploader": "openfoodfacts-contributors"
},
"37": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 4032,
"w": 3024
}
},
"uploaded_t": 1542512064,
"uploader": "openfoodfacts-contributors"
},
"38": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploaded_t": 1542512816
},
"39": {
"uploaded_t": 1544179571,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploader": "openfoodfacts-contributors"
},
"front_de": {
"y2": "0",
"x1": "0",
"x2": "0",
"rev": "43",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"200": {
"w": 150,
"h": 200
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 2448,
"h": 3264
}
},
"white_magic": "false",
"imgid": "3",
"angle": "0",
"normalize": "false",
"y1": "0",
"geometry": "0x0-0-0"
},
"ingredients_fr": {
"white_magic": "false",
"sizes": {
"100": {
"w": 100,
"h": 56
},
"200": {
"w": 200,
"h": 112
},
"400": {
"h": 225,
"w": 400
},
"full": {
"h": 340,
"w": 605
}
},
"geometry": "605x340-532-1084",
"normalize": "false",
"rev": "22",
"imgid": "8"
},
"nutrition": {
"normalize": "false",
"rev": "23",
"imgid": "9",
"white_magic": "false",
"sizes": {
"100": {
"w": 100,
"h": 81
},
"200": {
"w": 200,
"h": 162
},
"400": {
"w": 400,
"h": 324
},
"full": {
"h": 530,
"w": 655
}
},
"geometry": "655x530-527-839"
},
"front": {
"imgid": "10",
"normalize": "false",
"rev": "21",
"geometry": "980x1495-227-299",
"sizes": {
"100": {
"w": 66,
"h": 100
},
"200": {
"h": 200,
"w": 131
},
"400": {
"w": 262,
"h": 400
},
"full": {
"w": 980,
"h": 1495
}
},
"white_magic": "false"
},
"front_fr": {
"imgid": "3",
"y1": "0",
"angle": "0",
"normalize": "false",
"geometry": "0x0-0-0",
"x2": "0",
"x1": "0",
"y2": "0",
"rev": "42",
"sizes": {
"100": {
"h": "100",
"w": "75"
},
"200": {
"w": 150,
"h": 200
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
},
"white_magic": "false"
},
"ingredients": {
"geometry": "605x340-532-1084",
"sizes": {
"100": {
"h": 56,
"w": 100
},
"200": {
"h": 112,
"w": 200
},
"400": {
"w": 400,
"h": 225
},
"full": {
"w": 605,
"h": 340
}
},
"white_magic": "false",
"imgid": "8",
"normalize": "false",
"rev": "22"
},
"nutrition_fr": {
"rev": "23",
"normalize": "false",
"imgid": "9",
"white_magic": "false",
"geometry": "655x530-527-839",
"sizes": {
"100": {
"h": 81,
"w": 100
},
"200": {
"h": 162,
"w": 200
},
"400": {
"h": 324,
"w": 400
},
"full": {
"w": 655,
"h": 530
}
}
}
},
"nutrition_data": "on",
"generic_name": "Pâte à tartiner aux noisettes et au cacao",
"nova_groups": "4",
"link": "http://www.ferrero.fr/nutella",
"completed_t": 1413757285,
"brands": "Nutella,Ferrero",
"categories_prev_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"ingredients_ids_debug": [
"sucre",
"huile-de-palme",
"noisettes-13",
"cacao-maigre-7",
"4",
"lait-ecreme-en-poudre-6",
"6",
"lactoserum-en-poudre",
"emulsifiants",
"lecithines",
"soja",
"vanilline"
],
"countries_hierarchy": [
"en:algeria",
"en:belgium",
"en:canada",
"en:france",
"en:germany",
"en:italy",
"en:mali",
"en:martinique",
"en:spain",
"en:switzerland"
],
"packaging_tags": [
"bocal",
"pot",
"verre"
],
"nutrient_levels_tags": [
"en:fat-in-high-quantity",
"en:saturated-fat-in-high-quantity",
"en:sugars-in-high-quantity",
"en:salt-in-low-quantity"
],
"states_tags": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-to-be-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"states_hierarchy": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-to-be-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"nutrition_data_per_debug_tags": [],
"editors_tags": [
"scanbot",
"the-redburn",
"tacite",
"openfoodfacts-contributors",
"greyxor",
"nicolasleger",
"teolemon",
"jeanbono",
"sebleouf",
"fabricetheytaz",
"twoflower",
"date-limite-app"
],
"emb_codes_tags": [],
"last_edit_dates_tags": [
"2018-12-07",
"2018-12",
"2018"
],
"labels_hierarchy": [],
"product_name": "Nutella",
"origins_tags": [],
"codes_tags": [
"code-13",
"3017624047813",
"301762404781x",
"30176240478xx",
"3017624047xxx",
"301762404xxxx",
"30176240xxxxx",
"3017624xxxxxx",
"301762xxxxxxx",
"30176xxxxxxxx",
"3017xxxxxxxxx",
"301xxxxxxxxxx",
"30xxxxxxxxxxx",
"3xxxxxxxxxxxx"
],
"nova_group": "4",
"states": "en:to-be-checked, en:complete, en:nutrition-facts-completed, en:ingredients-completed, en:expiration-date-to-be-completed, en:packaging-code-to-be-completed, en:characteristics-completed, en:categories-completed, en:brands-completed, en:packaging-completed, en:quantity-completed, en:product-name-completed, en:photos-validated, en:photos-uploaded",
"manufacturing_places": "",
"nutrition_score_warning_no_fiber": 1,
"ingredients_that_may_be_from_palm_oil_tags": [],
"traces_debug_tags": [],
"ingredients_tags": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"additives_old_tags": [
"en:e322"
],
"debug_param_sorted_langs": [
"fr",
"de"
],
"nucleotides_tags": [],
"ingredients_text_debug": "Sucre, huile de palme, _noisettes_ 13%, cacao maigre 7,4%, _lait_ écrémé en poudre 6,6%, _lactosérum_ en poudre, émulsifiants : lécithines (_soja_), vanilline.",
"ingredients_that_may_be_from_palm_oil_n": 0,
"traces_from_user": "(fr)",
"_id": "3017624047813",
"emb_codes_orig": "",
"allergens_tags": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"selected_images": {
"nutrition": {
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/nutrition_fr.23.400.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/nutrition_fr.23.100.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/nutrition_fr.23.200.jpg"
}
},
"front": {
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.100.jpg",
"de": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_de.43.100.jpg"
},
"small": {
"de": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_de.43.200.jpg",
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.200.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.400.jpg",
"de": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_de.43.400.jpg"
}
},
"ingredients": {
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/ingredients_fr.22.400.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/ingredients_fr.22.100.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/404/7813/ingredients_fr.22.200.jpg"
}
}
},
"brands_debug_tags": [],
"ingredients_from_or_that_may_be_from_palm_oil_n": 1,
"sortkey": 1544179572,
"product_quantity": 780,
"languages_tags": [
"en:french",
"en:german",
"en:2",
"en:multilingual"
],
"packaging_debug_tags": [],
"stores_tags": [],
"ingredients_from_palm_oil_n": 1,
"vitamins_prev_tags": [],
"unique_scans_n": 799,
"cities_tags": [],
"vitamins_tags": [],
"labels_prev_tags": [],
"image_front_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.400.jpg",
"image_front_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/front_fr.42.100.jpg",
"labels_lc": "fr",
"traces": "",
"nutrition_data_prepared": "",
"link_debug_tags": [],
"generic_name_fr_debug_tags": [],
"categories_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"categories_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"scans_n": 860,
"labels_tags": [],
"languages": {
"en:german": 2,
"en:french": 6
},
"image_nutrition_small_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/nutrition_fr.23.200.jpg",
"labels": "",
"product_name_de": "",
"nucleotides_prev_tags": [],
"ingredients_hierarchy": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"serving_size": "15 g",
"origins": "",
"additives_original_tags": [
"en:e322"
],
"additives_old_n": 1,
"generic_name_de": "",
"emb_codes": "",
"generic_name_fr": "Pâte à tartiner aux noisettes et au cacao",
"quantity": "780 g",
"minerals_tags": [],
"brands_tags": [
"nutella",
"ferrero"
],
"ingredients_text_with_allergens_fr": "Sucre, huile de palme, <span class=\"allergen\">noisettes</span> 13%, cacao maigre 7,4%, <span class=\"allergen\">lait</span> écrémé en poudre 6,6%, <span class=\"allergen\">lactosérum</span> en poudre, émulsifiants : lécithines (<span class=\"allergen\">soja</span>), vanilline.",
"image_nutrition_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/404/7813/nutrition_fr.23.100.jpg"
},
{
"image_nutrition_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/nutrition_fr.16.200.jpg",
"scans_n": 525,
"labels_tags": [],
"languages": {
"en:german": 1,
"en:french": 6,
"en:english": 1
},
"product_name_de": "",
"labels": "",
"serving_size": "15 g",
"ingredients_hierarchy": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"nucleotides_prev_tags": [],
"additives_old_n": 1,
"additives_original_tags": [
"en:e322"
],
"origins": "",
"emb_codes": "",
"generic_name_de": "",
"minerals_tags": [],
"quantity": "1 kg",
"generic_name_fr": "Pâte à tartiner au cacao et aux noisettes",
"image_nutrition_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/nutrition_fr.16.100.jpg",
"ingredients_text_with_allergens_fr": "sucre, huile de palme, <span class=\"allergen\">noisettes</span> (13%), cacao maigre (7,4%), <span class=\"allergen\">lait</span> écrémé en poudre (6.6%), <span class=\"allergen\">lactosérum</span> en poudre, émulsifiants : lécithines (<span class=\"allergen\">soja</span>), vanilline",
"brands_tags": [
"ferrero",
"nutella"
],
"nucleotides_tags": [],
"ingredients_text_debug": "sucre, huile de palme, _noisettes_ (13%), cacao maigre (7,4%), _lait_ écrémé en poudre (6.6%), _lactosérum_ en poudre, émulsifiants : lécithines (_soja_), vanilline",
"ingredients_that_may_be_from_palm_oil_n": 0,
"debug_param_sorted_langs": [
"fr",
"de",
"en"
],
"additives_old_tags": [
"en:e322"
],
"allergens_tags": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"product_name_en_debug_tags": [],
"emb_codes_orig": "",
"_id": "3017620401473",
"sortkey": 1539636834,
"ingredients_from_or_that_may_be_from_palm_oil_n": 1,
"selected_images": {
"nutrition": {
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/nutrition_fr.16.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/nutrition_fr.16.100.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/nutrition_fr.16.400.jpg"
}
},
"ingredients": {
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/ingredients_fr.15.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/ingredients_fr.15.100.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/ingredients_fr.15.400.jpg"
}
},
"front": {
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.100.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.400.jpg"
}
}
},
"brands_debug_tags": [],
"ingredients_from_palm_oil_n": 1,
"stores_tags": [],
"packaging_debug_tags": [],
"languages_tags": [
"en:german",
"en:french",
"en:english",
"en:3",
"en:multilingual"
],
"generic_name_en": "",
"image_front_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.400.jpg",
"labels_prev_tags": [],
"cities_tags": [],
"vitamins_tags": [],
"unique_scans_n": 466,
"vitamins_prev_tags": [],
"traces": "",
"labels_lc": "fr",
"image_front_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.100.jpg",
"generic_name_fr_debug_tags": [],
"categories_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"link_debug_tags": [],
"categories_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"completed_t": 1425039185,
"link": "http://www.ferrero.fr/nutella",
"nova_groups": "4",
"generic_name": "Pâte à tartiner au cacao et aux noisettes",
"brands": "Ferrero,Nutella",
"categories_prev_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"nutrient_levels_tags": [
"en:fat-in-high-quantity",
"en:saturated-fat-in-high-quantity",
"en:sugars-in-high-quantity",
"en:salt-in-low-quantity"
],
"states_tags": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-to-be-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"countries_hierarchy": [
"en:belgium",
"en:france",
"en:italy",
"en:morocco",
"en:switzerland",
"en:united-kingdom"
],
"packaging_tags": [
"bocal",
"verre",
"bocal",
"pot"
],
"ingredients_ids_debug": [
"sucre",
"huile-de-palme",
"noisettes",
"13",
"cacao-maigre",
"7",
"4",
"lait-ecreme-en-poudre",
"6-6",
"lactoserum-en-poudre",
"emulsifiants",
"lecithines",
"soja",
"vanilline"
],
"labels_hierarchy": [],
"last_edit_dates_tags": [
"2018-10-15",
"2018-10",
"2018"
],
"emb_codes_tags": [],
"sources": [
{
"fields": [
"serving_size",
"ingredients_text_en",
"ingredients_text_de"
],
"id": "openfood-ch",
"url": "https://www.openfood.ch/en/products/15703",
"import_t": 1486510853,
"images": [
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
"32",
"33",
"34",
"35",
"36",
"37"
]
}
],
"editors_tags": [
"tacinte",
"openfood-ch-import",
"remilk",
"scanbot",
"nicolasleger",
"zeldama",
"date-limite-app",
"openfoodfacts-contributors",
"yukafix",
"agamitsudo",
"tacite",
"kiliweb",
"teolemon",
"domdom26",
"sebleouf",
"zoneblockscommunity",
"xmiky974x"
],
"nutrition_data_per_debug_tags": [],
"states_hierarchy": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-to-be-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"nova_group": "4",
"codes_tags": [
"code-13",
"3017620401473",
"301762040147x",
"30176204014xx",
"3017620401xxx",
"301762040xxxx",
"30176204xxxxx",
"3017620xxxxxx",
"301762xxxxxxx",
"30176xxxxxxxx",
"3017xxxxxxxxx",
"301xxxxxxxxxx",
"30xxxxxxxxxxx",
"3xxxxxxxxxxxx"
],
"origins_tags": [],
"product_name": "Nutella",
"ingredients_that_may_be_from_palm_oil_tags": [],
"traces_debug_tags": [],
"nutrition_score_warning_no_fiber": 1,
"states": "en:to-be-checked, en:complete, en:nutrition-facts-completed, en:ingredients-completed, en:expiration-date-to-be-completed, en:packaging-code-to-be-completed, en:characteristics-completed, en:categories-completed, en:brands-completed, en:packaging-completed, en:quantity-completed, en:product-name-completed, en:photos-validated, en:photos-uploaded",
"manufacturing_places": "",
"ingredients_tags": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"ingredients_text_with_allergens_en": "Sugar, Palm Oil, <span class=\"allergen\">Hazelnuts</span> (13%), Fat-Reduced Cocoa (7.4%), Skimmed Milk Powder (6.6%), Whey Powder (<span class=\"allergen\">Milk</span>), Emulsifier: Lecithin (<span class=\"allergen\">Soya</span>), Vanillin",
"max_imgid": "41",
"emb_codes_debug_tags": [],
"rev": 76,
"editors": [
"",
"tacinte",
"teolemon",
"nicolasleger",
"agamitsudo",
"domdom26",
"remilk",
"tacite",
"scanbot"
],
"allergens": "noisettes, lait, lactosérum, soja",
"image_nutrition_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/nutrition_fr.16.400.jpg",
"ingredients_text_fr": "sucre, huile de palme, _noisettes_ (13%), cacao maigre (7,4%), _lait_ écrémé en poudre (6.6%), _lactosérum_ en poudre, émulsifiants : lécithines (_soja_), vanilline",
"categories_lc": "fr",
"complete": 1,
"id": "3017620401473",
"languages_hierarchy": [
"en:german",
"en:french",
"en:english"
],
"lang_debug_tags": [],
"countries": "Belgique, France, Italie, Maroc, Suisse, Royaume-Uni",
"emb_codes_20141016": "",
"unknown_nutrients_tags": [],
"additives_prev_tags": [
"en:e322"
],
"images": {
"1": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2448,
"h": 3264
}
},
"uploaded_t": 1366917287,
"uploader": "openfoodfacts-contributors"
},
"2": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 2666,
"w": 2000
}
},
"uploaded_t": 1425029569,
"uploader": "domdom26"
},
"3": {
"uploaded_t": 1425029591,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 2000,
"h": 2666
}
},
"uploader": "domdom26"
},
"4": {
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2000,
"h": 2666
}
},
"uploaded_t": 1425029618,
"uploader": "domdom26"
},
"5": {
"uploader": "domdom26",
"uploaded_t": 1425029646,
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2000,
"h": 2666
}
}
},
"6": {
"uploader": "tacinte",
"sizes": {
"100": {
"h": 100,
"w": 74
},
"400": {
"w": 297,
"h": 400
},
"full": {
"w": 2000,
"h": 2697
}
},
"uploaded_t": 1426277833
},
"7": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 2000,
"h": 3554
}
},
"uploaded_t": 1426960930,
"uploader": "openfoodfacts-contributors"
},
"10": {
"uploaded_t": "1453476242",
"sizes": {
"100": {
"w": 61,
"h": 100
},
"400": {
"w": 245,
"h": 400
},
"full": {
"h": 400,
"w": 245
}
},
"uploader": "date-limite-app"
},
"11": {
"uploaded_t": "1453590293",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 2000,
"w": 1125
}
},
"uploader": "openfoodfacts-contributors"
},
"12": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 1125,
"h": 2000
}
},
"uploaded_t": "1454268244"
},
"13": {
"uploader": "date-limite-app",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 1333,
"w": 1000
}
},
"uploaded_t": "1455977784"
},
"14": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1459120652",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 2000,
"w": 1125
}
}
},
"15": {
"uploaded_t": "1459728989",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 1124,
"h": 2000
}
},
"uploader": "openfoodfacts-contributors"
},
"16": {
"uploader": "xmiky974x",
"uploaded_t": "1460578276",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 2448,
"h": 3264
}
}
},
"17": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": "1484049023",
"uploader": "openfoodfacts-contributors"
},
"18": {
"uploaded_t": 1486510847,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploader": "openfood-ch-import"
},
"19": {
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486510848,
"uploader": "openfood-ch-import"
},
"20": {
"uploaded_t": 1486510848,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploader": "openfood-ch-import"
},
"21": {
"uploaded_t": 1486510848,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploader": "openfood-ch-import"
},
"22": {
"uploaded_t": 1486510848,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 1000,
"w": 563
}
},
"uploader": "openfood-ch-import"
},
"23": {
"uploader": "openfood-ch-import",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486510849
},
"24": {
"uploader": "openfood-ch-import",
"uploaded_t": 1486510849,
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 750,
"h": 1000
}
}
},
"25": {
"uploaded_t": 1486510849,
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 1000,
"w": 563
}
},
"uploader": "openfood-ch-import"
},
"26": {
"uploader": "openfood-ch-import",
"uploaded_t": 1486510849,
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 563,
"h": 1000
}
}
},
"27": {
"uploaded_t": 1486510849,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploader": "openfood-ch-import"
},
"28": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486510850,
"uploader": "openfood-ch-import"
},
"29": {
"uploader": "openfood-ch-import",
"uploaded_t": 1486510850,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 1000,
"w": 563
}
}
},
"30": {
"uploader": "openfood-ch-import",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486510850
},
"31": {
"uploader": "openfood-ch-import",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486510850
},
"32": {
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 1000,
"w": 563
}
},
"uploaded_t": 1486510851,
"uploader": "openfood-ch-import"
},
"33": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 1000,
"w": 563
}
},
"uploaded_t": 1486510851,
"uploader": "openfood-ch-import"
},
"34": {
"uploader": "openfood-ch-import",
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486510851
},
"35": {
"uploaded_t": 1486510852,
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 1000,
"w": 563
}
},
"uploader": "openfood-ch-import"
},
"36": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 1000,
"w": 563
}
},
"uploaded_t": 1486510852,
"uploader": "openfood-ch-import"
},
"37": {
"uploader": "openfood-ch-import",
"uploaded_t": 1486510853,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
}
},
"40": {
"uploaded_t": "1474048156",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploader": "openfoodfacts-contributors"
},
"41": {
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 4032,
"w": 3024
}
},
"uploaded_t": "1474048177",
"uploader": "openfoodfacts-contributors"
},
"nutrition": {
"rev": "16",
"normalize": 'null',
"imgid": "3",
"white_magic": 'null',
"geometry": "1246x1600-406-173",
"sizes": {
"100": {
"h": 100,
"w": 78
},
"200": {
"h": 200,
"w": 156
},
"400": {
"h": 400,
"w": 312
},
"full": {
"h": 1600,
"w": 1246
}
}
},
"nutrition_fr": {
"normalize": 'null',
"rev": "16",
"imgid": "3",
"white_magic": 'null',
"geometry": "1246x1600-406-173",
"sizes": {
"100": {
"w": 78,
"h": 100
},
"200": {
"w": 156,
"h": 200
},
"400": {
"h": 400,
"w": 312
},
"full": {
"w": 1246,
"h": 1600
}
}
},
"front": {
"white_magic": 'null',
"sizes": {
"100": {
"h": 100,
"w": 61
},
"200": {
"h": 200,
"w": 123
},
"400": {
"w": 245,
"h": 400
},
"full": {
"h": 2653,
"w": 1627
}
},
"geometry": "1627x2653-186-13",
"normalize": 'null',
"rev": "20",
"imgid": "2"
},
"front_fr": {
"white_magic": 'null',
"geometry": "1627x2653-186-13",
"sizes": {
"100": {
"w": "61",
"h": "100"
},
"200": {
"h": 200,
"w": 123
},
"400": {
"h": 400,
"w": 245
},
"full": {
"w": 1627,
"h": 2653
}
},
"rev": "20",
"normalize": 'null',
"imgid": "2"
},
"ingredients": {
"white_magic": 'null',
"sizes": {
"100": {
"h": 61,
"w": 100
},
"200": {
"h": 122,
"w": 200
},
"400": {
"h": 244,
"w": 400
},
"full": {
"w": 1473,
"h": 900
}
},
"geometry": "1473x900-499-593",
"normalize": 'null',
"rev": "15",
"imgid": "4"
},
"ingredients_fr": {
"imgid": "4",
"rev": "15",
"normalize": 'null',
"sizes": {
"100": {
"h": 61,
"w": 100
},
"200": {
"h": 122,
"w": 200
},
"400": {
"w": 400,
"h": 244
},
"full": {
"w": 1473,
"h": 900
}
},
"geometry": "1473x900-499-593",
"white_magic": 'null'
}
},
"image_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.100.jpg",
"no_nutrition_data": "",
"manufacturing_places_debug_tags": [],
"product_name_en": "",
"last_image_t": 1539636781,
"serving_quantity": 15,
"origins_debug_tags": [],
"additives_prev_n": 1,
"allergens_from_ingredients": "noisettes, lait, lactosérum, soja",
"photographers_tags": [
"openfoodfacts-contributors",
"domdom26",
"tacinte",
"date-limite-app",
"xmiky974x",
"openfood-ch-import"
],
"expiration_date": "",
"image_ingredients_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/ingredients_fr.15.100.jpg",
"manufacturing_places_tags": [],
"entry_dates_tags": [
"2013-04-25",
"2013-04",
"2013"
],
"image_front_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.200.jpg",
"lc": "fr",
"last_modified_by": "sebleouf",
"product_name_fr_debug_tags": [],
"nutrition_grade_fr": "e",
"nutrition_score_debug": " -- energy 6 + sat-fat 10 + fr-sat-fat-for-fats 5 + sugars 10 + sodium 0 - fruits 0% 0 - fiber 0 - proteins 3 -- fsa 26 -- fr 26",
"labels_prev_hierarchy": [],
"quantity_debug_tags": [],
"ingredients_text_de": "Zucker, Palmöl, Haselnüsse (13%), fettarmes Kakao (7.4%), Magermilchpulver (6.6%), Süssmolkenpulver, Emulgator: Lecithine (Soja), Vanillin",
"pnns_groups_2_tags": [
"sweets"
],
"nutrition_grades": "e",
"nova_groups_tags": [
"en:4-ultra-processed-food-and-drink-products"
],
"_keywords": [
"et",
"noisette",
"produit",
"sucre",
"au",
"nutella",
"cacao",
"aux",
"pate",
"chocolat",
"petit-dejeuner",
"tartiner",
"ferrero"
],
"amino_acids_prev_tags": [],
"countries_lc": "fr",
"interface_version_modified": "20150316.jqm2",
"additives_tags": [
"en:e322"
],
"traces_from_ingredients": "",
"last_editor": "sebleouf",
"stores_debug_tags": [],
"nutrition_grades_tags": [
"e"
],
"amino_acids_tags": [],
"additives_prev_original_tags": [
"en:e322"
],
"last_modified_t": 1539636834,
"ingredients_text_en_debug_tags": [],
"ingredients": [
{
"id": "en:sugar",
"rank": 1,
"text": "sucre"
},
{
"text": "huile de palme",
"id": "en:palm-oil",
"rank": 2
},
{
"rank": 3,
"id": "en:hazelnut",
"percent": "13",
"text": "_noisettes_"
},
{
"percent": "7.4",
"text": "cacao maigre",
"rank": 4,
"id": "fr:cacao-maigre"
},
{
"rank": 5,
"id": "en:pasteurized-skim-milk",
"percent": "6.6",
"text": "_lait_ écrémé en poudre"
},
{
"id": "en:whey-powder",
"rank": 6,
"text": "_lactosérum_ en poudre"
},
{
"text": "émulsifiants",
"rank": 7,
"id": "en:emulsifier"
},
{
"id": "fr:vanilline",
"rank": 8,
"text": "vanilline"
},
{
"id": "en:lecithins",
"text": "lécithines"
},
{
"id": "en:soya",
"text": "_soja_"
}
],
"ingredients_text_de_debug_tags": [],
"image_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.200.jpg",
"stores": "",
"last_image_dates_tags": [
"2018-10-15",
"2018-10",
"2018"
],
"nova_group_debug": " -- ingredients/en:sugar : 3 -- ingredients/en:whey : 4",
"ingredients_n_tags": [
"10",
"1-10"
],
"product_name_fr": "Nutella",
"checkers_tags": [],
"code": "3017620401473",
"categories": "Petit-déjeuners, Produits à tartiner, Produits à tartiner sucrés, Pâtes à tartiner, Pâtes à tartiner au chocolat, Pâtes à tartiner aux noisettes, Pâtes à tartiner aux noisettes et au cacao",
"ingredients_text_with_allergens": "sucre, huile de palme, <span class=\"allergen\">noisettes</span> (13%), cacao maigre (7,4%), <span class=\"allergen\">lait</span> écrémé en poudre (6.6%), <span class=\"allergen\">lactosérum</span> en poudre, émulsifiants : lécithines (<span class=\"allergen\">soja</span>), vanilline",
"pnns_groups_1": "Sugary snacks",
"categories_debug_tags": [],
"pnns_groups_2": "Sweets",
"additives_debug_tags": [],
"countries_tags": [
"en:belgium",
"en:france",
"en:italy",
"en:morocco",
"en:switzerland",
"en:united-kingdom"
],
"image_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/front_fr.20.400.jpg",
"nutrition_data_per": "100g",
"informers_tags": [
"openfoodfacts-contributors",
"remilk",
"agamitsudo",
"teolemon",
"domdom26",
"tacite",
"openfood-ch-import"
],
"generic_name_en_debug_tags": [],
"purchase_places_debug_tags": [],
"url": "https://fr.openfoodfacts.org/produit/3017620401473/nutella-ferrero",
"lang": "fr",
"pnns_groups_1_tags": [
"sugary-snacks"
],
"serving_size_debug_tags": [],
"countries_beforescanbot": "Belgique,France,Suisse",
"labels_debug_tags": [],
"product_name_debug_tags": [],
"quality_tags": [
"ingredients-de-4-consonants",
"quantity-not-recognized"
],
"languages_codes": {
"fr": 6,
"de": 1,
"en": 1
},
"interface_version_created": "20120622",
"creator": "openfoodfacts-contributors",
"ingredients_text_with_allergens_de": "Zucker, Palmöl, <span class=\"allergen\">Haselnüsse</span> (13%), fettarmes Kakao (7.4%), <span class=\"allergen\">Magermilchpulver</span> (6.6%), <span class=\"allergen\">Süssmolkenpulver</span>, Emulgator: Lecithine (<span class=\"allergen\">Soja</span>), Vanillin",
"ingredients_debug": [
"sucre",
",",
'null',
'null',
'null',
" huile de palme",
",",
'null',
'null',
'null',
" _noisettes_ ",
"(",
"(",
'null',
'null',
"13%)",
",",
'null',
'null',
'null',
" cacao maigre ",
"(",
"(",
'null',
'null',
"7",
",",
'null',
'null',
'null',
"4%)",
",",
'null',
'null',
'null',
" _lait_ écrémé en poudre ",
"(",
"(",
'null',
'null',
"6.6%)",
",",
'null',
'null',
'null',
" _lactosérum_ en poudre",
",",
'null',
'null',
'null',
" émulsifiants ",
":",
":",
'null',
'null',
" lécithines ",
"(",
"(",
'null',
'null',
"_soja_)",
",",
'null',
'null',
'null',
" vanilline"
],
"countries_debug_tags": [],
"nutrition_score_warning_no_fruits_vegetables_nuts": 1,
"ingredients_text_en": "Sugar, Palm Oil, Hazelnuts (13%), Fat-Reduced Cocoa (7.4%), Skimmed Milk Powder (6.6%), Whey Powder (Milk), Emulsifier: Lecithin (Soya), Vanillin",
"ingredients_n": "10",
"image_ingredients_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/ingredients_fr.15.200.jpg",
"product_name_de_debug_tags": [],
"expiration_date_debug_tags": [],
"packaging": "Bocal,Verre,bocal,pot",
"nutriments": {
"sugars_serving": 8.52,
"carbohydrates_serving": 8.64,
"sodium_100g": 0.0393700787401575,
"salt_serving": 0.015,
"proteins_serving": 0.9,
"nutrition-score-uk_100g": 26,
"carbohydrates_100g": 57.6,
"nova-group_serving": "4",
"carbohydrates": 57.6,
"salt": 0.1,
"energy_serving": "341",
"energy_100g": "2276",
"salt_value": "0.1",
"sugars_value": "56.8",
"salt_100g": 0.1,
"saturated-fat_serving": 1.65,
"sugars": 56.8,
"energy": "2276",
"sodium_serving": 0.00591,
"nutrition-score-fr": 26,
"saturated-fat_100g": "11",
"nutrition-score-uk": 26,
"proteins_unit": "",
"proteins_100g": "6",
"saturated-fat_value": "11",
"fat_value": "31.6",
"saturated-fat": "11",
"fat_unit": "",
"nova-group_100g": "4",
"nova-group": "4",
"fat_serving": 4.74,
"fat": 31.6,
"fat_100g": 31.6,
"sugars_unit": "",
"salt_unit": "",
"carbohydrates_unit": "",
"proteins": "6",
"carbohydrates_value": "57.6",
"energy_unit": "kcal",
"proteins_value": "6",
"energy_value": "544",
"saturated-fat_unit": "",
"nutrition-score-fr_100g": 26,
"sugars_100g": 56.8,
"sodium": 0.0393700787401575
},
"unknown_ingredients_n": 0,
"created_t": 1366917286,
"minerals_prev_tags": [],
"ingredients_from_palm_oil_tags": [
"huile-de-palme"
],
"fruits-vegetables-nuts_100g_estimate": 0,
"ingredients_text": "sucre, huile de palme, _noisettes_ (13%), cacao maigre (7,4%), _lait_ écrémé en poudre (6.6%), _lactosérum_ en poudre, émulsifiants : lécithines (_soja_), vanilline",
"traces_hierarchy": [],
"nutrition_score_beverage": 0,
"correctors_tags": [
"teolemon",
"scanbot",
"nicolasleger",
"domdom26",
"agamitsudo",
"tacite",
"date-limite-app",
"zeldama",
"kiliweb",
"yukafix"
],
"nutrient_levels": {
"sugars": "high",
"saturated-fat": "high",
"fat": "high",
"salt": "low"
},
"purchase_places_tags": [
"france"
],
"image_ingredients_url": "https://static.openfoodfacts.org/images/products/301/762/040/1473/ingredients_fr.15.400.jpg",
"new_additives_n": 1,
"ingredients_original_tags": [
"en:sugar",
"en:palm-oil",
"en:hazelnut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:whey-powder",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"ingredients_text_fr_debug_tags": [],
"product_name_it": "",
"purchase_places": "France",
"categories_prev_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"generic_name_de_debug_tags": [],
"traces_tags": [],
"misc_tags": [
"en:nutrition-no-fiber",
"en:nutrition-no-fruits-vegetables-nuts",
"en:nutrition-no-fiber-or-fruits-vegetables-nuts",
"en:nutriscore-computed"
],
"additives_n": 1,
"update_key": "pnns12",
"allergens_hierarchy": [
"en:milk",
"en:nuts",
"en:soybeans"
]
},
{
"minerals_prev_tags": [],
"ingredients_from_palm_oil_tags": [
"huile-de-palme"
],
"packaging": "Pot,Verre,Couvercle,Plastique",
"expiration_date_debug_tags": [],
"nutriments": {
"saturated-fat_100g": 10.6,
"nutrition-score-fr": 26,
"proteins_100g": 6.3,
"proteins_unit": "",
"nutrition-score-uk": 26,
"salt_value": "0.107",
"energy_100g": "2255",
"energy_serving": "338",
"energy": "2255",
"sodium_serving": 0.00632,
"sugars": 56.3,
"saturated-fat_serving": 1.59,
"salt_100g": 0.107,
"sugars_value": "56.3",
"nova-group_serving": "4",
"carbohydrates": 57.5,
"salt": 0.107,
"carbohydrates_serving": 8.62,
"sugars_serving": 8.44,
"proteins_serving": 0.945,
"salt_serving": 0.016,
"nutrition-score-uk_100g": 26,
"carbohydrates_100g": 57.5,
"sodium_100g": 0.0421259842519685,
"saturated-fat_unit": "",
"energy_value": "539",
"sodium": 0.0421259842519685,
"sugars_100g": 56.3,
"nutrition-score-fr_100g": 26,
"salt_unit": "",
"fat_100g": 30.9,
"sugars_unit": "",
"proteins_value": "6.3",
"energy_unit": "kcal",
"carbohydrates_value": "57.5",
"proteins": 6.3,
"carbohydrates_unit": "",
"nova-group": "4",
"nova-group_100g": "4",
"fat_unit": "",
"fat": 30.9,
"fat_serving": 4.63,
"saturated-fat_value": "10.6",
"saturated-fat": 10.6,
"fat_value": "30.9"
},
"unknown_ingredients_n": 0,
"created_t": 1371278309,
"traces_hierarchy": [],
"generic_name_el_debug_tags": [],
"ingredients_text": "Sucre, huile de palme, _noisettes_ 13%, cacao maigre 7.4%, _lait_ écrémé en poudre 6.6%, _lactosérum_ en poudre, émulsifiant (lécithine de _soja_), vanilline.",
"fruits-vegetables-nuts_100g_estimate": 0,
"ingredients_original_tags": [
"en:sugar",
"en:palm-oil",
"en:hazelnut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:whey-powder",
"en:emulsifier",
"en:soya-lecithin",
"fr:vanilline"
],
"ingredients_text_fr_debug_tags": [],
"purchase_places": "Marseille,france,Greece",
"product_name_xx": "",
"purchase_places_tags": [
"marseille",
"france",
"greece"
],
"image_ingredients_url": "https://static.openfoodfacts.org/images/products/59032823/ingredients_fr.36.400.jpg",
"nutrition_score_beverage": 0,
"correctors_tags": [
"agamitsudo",
"teolemon",
"nicolasleger",
"segundo",
"scanbot",
"gkaklas",
"date-limite-app",
"lmrp",
"kiliweb",
"solveig-yuka",
"tacite",
"yukafix",
"openfoodfacts-contributors"
],
"nutrient_levels": {
"salt": "low",
"fat": "high",
"saturated-fat": "high",
"sugars": "high"
},
"new_additives_n": 0,
"update_key": "pnns12",
"allergens_hierarchy": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"categories_prev_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"additives_n": 1,
"misc_tags": [
"en:nutrition-no-fiber",
"en:nutrition-no-fruits-vegetables-nuts",
"en:nutrition-no-fiber-or-fruits-vegetables-nuts",
"en:nutriscore-computed"
],
"traces_tags": [],
"generic_name_de_debug_tags": [],
"purchase_places_debug_tags": [],
"lang": "fr",
"url": "https://fr.openfoodfacts.org/produit/59032823/nutella",
"informers_tags": [
"chandon",
"openfoodfacts-contributors",
"gkaklas",
"date-limite-app",
"openfood-ch-import",
"kiliweb",
"tacite"
],
"generic_name_en_debug_tags": [],
"serving_size_debug_tags": [],
"countries_beforescanbot": "France",
"pnns_groups_1_tags": [
"sugary-snacks"
],
"product_name_el_debug_tags": [],
"quality_tags": [
"ingredients-de-4-consonants"
],
"languages_codes": {
"en": 1,
"de": 3,
"fr": 6,
"el": 3
},
"labels_debug_tags": [],
"product_name_debug_tags": [],
"nutrition_data_prepared_per": "100g",
"ingredients_text_en": "",
"product_name_de_debug_tags": [],
"image_ingredients_small_url": "https://static.openfoodfacts.org/images/products/59032823/ingredients_fr.36.200.jpg",
"generic_name_el": "Κρέμα φουντουκιού και κακάο",
"ingredients_n": "9",
"interface_version_created": "20120622",
"creator": "chandon",
"countries_debug_tags": [],
"nutrition_score_warning_no_fruits_vegetables_nuts": 1,
"ingredients_text_with_allergens_de": "Zucker, Palmöl, <span class=\"allergen\">Haselnüsse</span> (13 %), fettarmer Kakao (7,4 %), <span class=\"allergen\">Magermilchpulver</span> (6,6 %), <span class=\"allergen\">Süssmolkenpulver</span>, Emulgator: Lecithine (<span class=\"allergen\">Soja</span>), Vanillin.",
"ingredients_debug": [
"Sucre",
",",
'null',
'null',
'null',
" huile de palme",
",",
'null',
'null',
'null',
" _noisettes_ 13%",
",",
'null',
'null',
'null',
" cacao maigre 7.4%",
",",
'null',
'null',
'null',
" _lait_ écrémé en poudre 6.6%",
",",
'null',
'null',
'null',
" _lactosérum_ en poudre",
",",
'null',
'null',
'null',
" émulsifiant ",
":",
":",
'null',
'null',
" ",
"(",
"(",
'null',
'null',
"lécithine de _soja_)",
",",
'null',
'null',
'null',
" vanilline."
],
"amino_acids_tags": [],
"additives_tags": [
"en:e322",
"en:e322i"
],
"traces_from_ingredients": "",
"last_editor": "kiliweb",
"nutrition_grades_tags": [
"e"
],
"stores_debug_tags": [],
"ingredients_text_de_debug_tags": [],
"product_name_el": "Nutella",
"ingredients": [
{
"id": "en:sugar",
"rank": 1,
"text": "Sucre"
},
{
"text": "huile de palme",
"id": "en:palm-oil",
"rank": 2
},
{
"text": "_noisettes_",
"percent": "13",
"id": "en:hazelnut",
"rank": 3
},
{
"id": "fr:cacao-maigre",
"rank": 4,
"text": "cacao maigre",
"percent": "7.4"
},
{
"rank": 5,
"id": "en:pasteurized-skim-milk",
"percent": "6.6",
"text": "_lait_ écrémé en poudre"
},
{
"text": "_lactosérum_ en poudre",
"rank": 6,
"id": "en:whey-powder"
},
{
"text": "émulsifiant",
"rank": 7,
"id": "en:emulsifier"
},
{
"id": "en:soya-lecithin",
"rank": 8,
"text": "lécithine de _soja_"
},
{
"text": "vanilline",
"id": "fr:vanilline",
"rank": 9
}
],
"image_small_url": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.200.jpg",
"stores": "Auchan,ΑΒ Βασιλόπουλος",
"additives_prev_original_tags": [
"en:e322"
],
"last_modified_t": 1534430924,
"ingredients_text_en_debug_tags": [],
"ingredients_text_el": "Ζάχαρη, φοινικέλαιο, _φουντούκια_ 13%, κακάο αποβουτυρωμένο 7.4%, _γάλα_ αποβουτυρωμένο σε σκόνη 6.6%, ορός _γάλακτος_ σε σκόνη, γαλακτοματοποιητής (λεκιθίνες _σόγιας_), βανιλλίνη",
"categories": "Petit-déjeuners, Produits à tartiner, Produits à tartiner sucrés, Pâtes à tartiner, Pâtes à tartiner au chocolat, Pâtes à tartiner aux noisettes, Pâtes à tartiner aux noisettes et au cacao",
"code": "59032823",
"checkers_tags": [],
"pnns_groups_2": "Sweets",
"additives_debug_tags": [
"en-e322i-added"
],
"ingredients_text_with_allergens": "Sucre, huile de palme, <span class=\"allergen\">noisettes</span> 13%, cacao maigre 7.4%, <span class=\"allergen\">lait</span> écrémé en poudre 6.6%, <span class=\"allergen\">lactosérum</span> en poudre, émulsifiant (lécithine de <span class=\"allergen\">soja</span>), vanilline.",
"categories_debug_tags": [],
"pnns_groups_1": "Sugary snacks",
"last_image_dates_tags": [
"2018-07-03",
"2018-07",
"2018"
],
"nova_group_debug": " -- ingredients/en:sugar : 3 -- ingredients/en:whey : 4",
"product_name_fr": "Nutella",
"ingredients_n_tags": [
"9",
"1-10"
],
"image_url": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.400.jpg",
"nutrition_data_per": "100g",
"countries_tags": [
"en:belgium",
"en:france",
"en:greece",
"en:italy",
"en:spain",
"en:switzerland",
"en:united-kingdom"
],
"generic_name_xx": "",
"image_ingredients_thumb_url": "https://static.openfoodfacts.org/images/products/59032823/ingredients_fr.36.100.jpg",
"manufacturing_places_tags": [],
"origins_debug_tags": [],
"additives_prev_n": 1,
"serving_quantity": 15,
"last_image_t": 1530638198,
"product_name_en": "Nutella",
"expiration_date": "2017-10-10",
"photographers_tags": [
"chandon",
"openfoodfacts-contributors",
"openfood-ch-import",
"kiliweb"
],
"allergens_from_ingredients": "noisettes, lait, lactosérum, soja",
"image_front_small_url": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.200.jpg",
"lc": "fr",
"ingredients_text_xx_debug_tags": [],
"entry_dates_tags": [
"2013-06-15",
"2013-06",
"2013"
],
"last_modified_by": "kiliweb",
"nutrition_score_debug": " -- energy 6 + sat-fat 10 + fr-sat-fat-for-fats 5 + sugars 10 + sodium 0 - fruits 0% 0 - fiber 0 - proteins 3 -- fsa 26 -- fr 26",
"labels_prev_hierarchy": [],
"quantity_debug_tags": [],
"product_name_fr_debug_tags": [],
"nutrition_grade_fr": "e",
"amino_acids_prev_tags": [],
"ingredients_text_el_debug_tags": [],
"interface_version_modified": "20150316.jqm2",
"countries_lc": "fr",
"pnns_groups_2_tags": [
"sweets"
],
"ingredients_text_de": "Zucker, Palmöl, Haselnüsse (13 %), fettarmer Kakao (7,4 %), Magermilchpulver (6,6 %), Süssmolkenpulver, Emulgator: Lecithine (Soja), Vanillin.",
"nova_groups_tags": [
"en:4-ultra-processed-food-and-drink-products"
],
"_keywords": [
"noisette",
"sucre",
"et",
"produit",
"pate",
"au",
"cacao",
"tartiner",
"aux",
"nutella",
"petit-dejeuner",
"chocolat"
],
"nutrition_grades": "e",
"max_imgid": "15",
"emb_codes_debug_tags": [],
"ingredients_text_with_allergens_en": "",
"image_nutrition_url": "https://static.openfoodfacts.org/images/products/59032823/nutrition_fr.37.400.jpg",
"allergens": "noisettes, lait, lactosérum, soja, noisettes, lait, lactosérum, soja",
"categories_lc": "fr",
"id": "59032823",
"complete": 1,
"ingredients_text_fr": "Sucre, huile de palme, _noisettes_ 13%, cacao maigre 7.4%, _lait_ écrémé en poudre 6.6%, _lactosérum_ en poudre, émulsifiant (lécithine de _soja_), vanilline.",
"rev": 49,
"editors": [
"",
"agamitsudo",
"segundo",
"teolemon",
"nicolasleger",
"chandon"
],
"lang_debug_tags": [],
"languages_hierarchy": [
"en:french",
"en:modern-greek",
"en:english",
"en:german"
],
"images": {
"1": {
"uploader": "chandon",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 299
},
"full": {
"w": 1936,
"h": 2592
}
},
"uploaded_t": 1371278310
},
"2": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1385842242,
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 2000,
"h": 3552
}
}
},
"3": {
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 4128,
"w": 2322
}
},
"uploaded_t": 1405853280,
"uploader": "openfoodfacts-contributors"
},
"4": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1417036425,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 2000,
"w": 1125
}
}
},
"5": {
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"h": 300,
"w": 400
},
"full": {
"h": 1500,
"w": 2000
}
},
"uploaded_t": "1457633624",
"uploader": "openfoodfacts-contributors"
},
"6": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 3264,
"w": 2448
}
},
"uploaded_t": "1469203636",
"uploader": "openfoodfacts-contributors"
},
"7": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1479880888",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 3555,
"w": 2000
}
}
},
"8": {
"uploaded_t": 1486491512,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 1000,
"w": 563
}
},
"uploader": "openfood-ch-import"
},
"9": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486491512,
"uploader": "openfood-ch-import"
},
"10": {
"uploaded_t": 1486491513,
"sizes": {
"100": {
"w": 56,
"h": 100
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 1000,
"w": 563
}
},
"uploader": "openfood-ch-import"
},
"11": {
"uploaded_t": 1486491513,
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 563,
"h": 1000
}
},
"uploader": "openfood-ch-import"
},
"12": {
"uploader": "openfood-ch-import",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 563,
"h": 1000
}
},
"uploaded_t": 1486491514
},
"13": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"w": 225,
"h": 400
},
"full": {
"h": 5984,
"w": 3366
}
},
"uploaded_t": "1495357416",
"uploader": "openfoodfacts-contributors"
},
"14": {
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 5984,
"w": 3366
}
},
"uploaded_t": "1495357463",
"uploader": "openfoodfacts-contributors"
},
"15": {
"sizes": {
"100": {
"w": 71,
"h": 100
},
"400": {
"w": 284,
"h": 400
},
"full": {
"h": 1200,
"w": 853
}
},
"uploaded_t": 1530638198,
"uploader": "kiliweb"
},
"front": {
"white_magic": "false",
"sizes": {
"100": {
"w": 72,
"h": 100
},
"200": {
"w": 143,
"h": 200
},
"400": {
"w": 286,
"h": 400
},
"full": {
"w": 1868,
"h": 2611
}
},
"geometry": "1868x2611-15-173",
"normalize": "true",
"rev": "13",
"imgid": "3"
},
"front_fr": {
"geometry": "0x0-0-0",
"imgid": "15",
"angle": 'null',
"normalize": "0",
"y1": 'null',
"sizes": {
"100": {
"w": "71",
"h": "100"
},
"200": {
"w": 142,
"h": 200
},
"400": {
"w": 284,
"h": 400
},
"full": {
"w": 853,
"h": 1200
}
},
"white_magic": "0",
"y2": 'null',
"x1": 'null',
"x2": 'null',
"rev": "48"
},
"nutrition_de": {
"y1": -1,
"angle": 0,
"normalize": 'null',
"imgid": "14",
"geometry": "0x0--14--14",
"rev": "35",
"x1": -1,
"x2": -1,
"y2": -1,
"white_magic": 'null',
"sizes": {
"100": {
"h": 100,
"w": 56
},
"200": {
"w": 113,
"h": 200
},
"400": {
"h": 400,
"w": 225
},
"full": {
"h": 5984,
"w": 3366
}
}
},
"nutrition_fr": {
"white_magic": "false",
"sizes": {
"100": {
"w": 82,
"h": 100
},
"200": {
"w": 164,
"h": 200
},
"400": {
"w": 328,
"h": 400
},
"full": {
"w": 355,
"h": 433
}
},
"rev": "37",
"x1": "25",
"x2": "167",
"y2": "268.4666748046875",
"geometry": "355x433-62-238",
"y1": "95.4666748046875",
"angle": "0",
"normalize": "false",
"imgid": "11"
},
"ingredients_de": {
"rev": "33",
"y2": -1,
"x1": -1,
"x2": -1,
"white_magic": 'null',
"sizes": {
"100": {
"w": 56,
"h": 100
},
"200": {
"w": 113,
"h": 200
},
"400": {
"w": 225,
"h": 400
},
"full": {
"w": 3366,
"h": 5984
}
},
"angle": 0,
"normalize": 'null',
"y1": -1,
"imgid": "13",
"geometry": "0x0--14--14"
},
"ingredients_fr": {
"geometry": "248x70-17-516",
"y1": "206.48333740234375",
"normalize": "false",
"angle": "0",
"imgid": "8",
"white_magic": "false",
"sizes": {
"100": {
"h": 28,
"w": 100
},
"200": {
"w": 200,
"h": 56
},
"400": {
"h": 70,
"w": 248
},
"full": {
"h": 70,
"w": 248
}
},
"rev": "36",
"x2": "106",
"x1": "7",
"y2": "234.48333740234375"
}
},
"image_thumb_url": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.100.jpg",
"manufacturing_places_debug_tags": [],
"no_nutrition_data": "",
"generic_name_xx_debug_tags": [],
"emb_codes_20141016": "",
"countries": "Belgique, France, Grèce, Italie, Espagne, Suisse, Royaume-Uni",
"unknown_nutrients_tags": [],
"additives_prev_tags": [
"en:e322"
],
"nova_groups": "4",
"completed_t": 1510168428,
"link": "http://www.ferrero.fr/nutella",
"generic_name": "Pâte à tartiner aux noisettes et au cacao",
"countries_hierarchy": [
"en:belgium",
"en:france",
"en:greece",
"en:italy",
"en:spain",
"en:switzerland",
"en:united-kingdom"
],
"packaging_tags": [
"pot",
"verre",
"couvercle",
"plastique"
],
"ingredients_ids_debug": [
"sucre",
"huile-de-palme",
"noisettes-13",
"cacao-maigre-7-4",
"lait-ecreme-en-poudre-6-6",
"lactoserum-en-poudre",
"emulsifiant",
"lecithine-de-soja",
"vanilline"
],
"nutrient_levels_tags": [
"en:fat-in-high-quantity",
"en:saturated-fat-in-high-quantity",
"en:sugars-in-high-quantity",
"en:salt-in-low-quantity"
],
"states_tags": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"brands": "Nutella",
"categories_prev_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"nova_group": "4",
"product_name": "Nutella",
"codes_tags": [
"code-8",
"59032823",
"5903282x",
"590328xx",
"59032xxx",
"5903xxxx",
"590xxxxx",
"59xxxxxx",
"5xxxxxxx"
],
"origins_tags": [],
"last_edit_dates_tags": [
"2018-08-16",
"2018-08",
"2018"
],
"emb_codes_tags": [],
"labels_hierarchy": [],
"states_hierarchy": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"nutrition_data_per_debug_tags": [],
"editors_tags": [
"chandon",
"openfood-ch-import",
"tacite",
"segundo",
"nicolasleger",
"teolemon",
"lmrp",
"date-limite-app",
"scanbot",
"yukafix",
"solveig-yuka",
"gkaklas",
"kiliweb",
"agamitsudo",
"openfoodfacts-contributors"
],
"sources": [
{
"fields": [
"ingredients_text_de"
],
"images": [
"8",
"9",
"10",
"11",
"12"
],
"id": "openfood-ch",
"import_t": 1486491514,
"url": "https://www.openfood.ch/en/products/1244"
}
],
"ingredients_tags": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"en:soya-lecithin",
"en:lecithins",
"fr:vanilline"
],
"nutrition_score_warning_no_fiber": 1,
"traces_debug_tags": [],
"ingredients_that_may_be_from_palm_oil_tags": [],
"manufacturing_places": "",
"states": "en:to-be-checked, en:complete, en:nutrition-facts-completed, en:ingredients-completed, en:expiration-date-completed, en:packaging-code-to-be-completed, en:characteristics-completed, en:categories-completed, en:brands-completed, en:packaging-completed, en:quantity-completed, en:product-name-completed, en:photos-validated, en:photos-uploaded",
"allergens_tags": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"_id": "59032823",
"emb_codes_orig": "",
"product_name_en_debug_tags": [],
"nucleotides_tags": [],
"ingredients_that_may_be_from_palm_oil_n": 0,
"ingredients_text_debug": "Sucre, huile de palme, _noisettes_ 13%, cacao maigre 7.4%, _lait_ écrémé en poudre 6.6%, _lactosérum_ en poudre, émulsifiant : (lécithine de _soja_), vanilline.",
"ingredients_text_xx": "",
"debug_param_sorted_langs": [
"xx",
"de",
"el",
"en",
"fr"
],
"additives_old_tags": [
"en:e1403",
"en:e322"
],
"stores_tags": [
"auchan",
"αβ-βασιλόπουλος"
],
"ingredients_from_palm_oil_n": 1,
"generic_name_en": "",
"languages_tags": [
"en:french",
"en:modern-greek",
"en:english",
"en:german",
"en:4",
"en:multilingual"
],
"packaging_debug_tags": [],
"sortkey": 1534430924,
"product_quantity": 630,
"brands_debug_tags": [],
"selected_images": {
"front": {
"small": {
"fr": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.100.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.400.jpg"
}
},
"ingredients": {
"small": {
"de": "https://static.openfoodfacts.org/images/products/59032823/ingredients_de.33.200.jpg",
"fr": "https://static.openfoodfacts.org/images/products/59032823/ingredients_fr.36.200.jpg"
},
"thumb": {
"de": "https://static.openfoodfacts.org/images/products/59032823/ingredients_de.33.100.jpg",
"fr": "https://static.openfoodfacts.org/images/products/59032823/ingredients_fr.36.100.jpg"
},
"display": {
"de": "https://static.openfoodfacts.org/images/products/59032823/ingredients_de.33.400.jpg",
"fr": "https://static.openfoodfacts.org/images/products/59032823/ingredients_fr.36.400.jpg"
}
},
"nutrition": {
"display": {
"fr": "https://static.openfoodfacts.org/images/products/59032823/nutrition_fr.37.400.jpg",
"de": "https://static.openfoodfacts.org/images/products/59032823/nutrition_de.35.400.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/59032823/nutrition_fr.37.200.jpg",
"de": "https://static.openfoodfacts.org/images/products/59032823/nutrition_de.35.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/59032823/nutrition_fr.37.100.jpg",
"de": "https://static.openfoodfacts.org/images/products/59032823/nutrition_de.35.100.jpg"
}
}
},
"ingredients_from_or_that_may_be_from_palm_oil_n": 1,
"labels_lc": "fr",
"traces": "",
"product_name_xx_debug_tags": [],
"image_front_thumb_url": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.100.jpg",
"labels_prev_tags": [],
"image_front_url": "https://static.openfoodfacts.org/images/products/59032823/front_fr.48.400.jpg",
"vitamins_prev_tags": [],
"vitamins_tags": [],
"cities_tags": [],
"unique_scans_n": 352,
"categories_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"link_debug_tags": [],
"categories_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"generic_name_fr_debug_tags": [],
"ingredients_text_with_allergens_el": "Ζάχαρη, φοινικέλαιο, <span class=\"allergen\">φουντούκια</span> 13%, κακάο αποβουτυρωμένο 7.4%, <span class=\"allergen\">γάλα</span> αποβουτυρωμένο σε σκόνη 6.6%, ορός <span class=\"allergen\">γάλακτος</span> σε σκόνη, γαλακτοματοποιητής (λεκιθίνες <span class=\"allergen\">σόγιας</span>), βανιλλίνη",
"ingredients_text_debug_tags": [],
"labels": "",
"product_name_de": "",
"image_nutrition_small_url": "https://static.openfoodfacts.org/images/products/59032823/nutrition_fr.37.200.jpg",
"languages": {
"en:german": 3,
"en:french": 6,
"en:modern-greek": 3,
"en:english": 1
},
"scans_n": 399,
"labels_tags": [],
"additives_old_n": 2,
"origins": "",
"additives_original_tags": [
"en:e322i"
],
"ingredients_hierarchy": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"en:soya-lecithin",
"en:lecithins",
"fr:vanilline"
],
"serving_size": "15 g",
"nucleotides_prev_tags": [],
"minerals_tags": [],
"generic_name_fr": "Pâte à tartiner aux noisettes et au cacao",
"quantity": "630 g",
"emb_codes": "",
"generic_name_de": "",
"image_nutrition_thumb_url": "https://static.openfoodfacts.org/images/products/59032823/nutrition_fr.37.100.jpg",
"ingredients_text_with_allergens_fr": "Sucre, huile de palme, <span class=\"allergen\">noisettes</span> 13%, cacao maigre 7.4%, <span class=\"allergen\">lait</span> écrémé en poudre 6.6%, <span class=\"allergen\">lactosérum</span> en poudre, émulsifiant (lécithine de <span class=\"allergen\">soja</span>), vanilline.",
"brands_tags": [
"nutella"
]
},
{
"codes_tags": [
"code-13",
"3017620402135",
"301762040213x",
"30176204021xx",
"3017620402xxx",
"301762040xxxx",
"30176204xxxxx",
"3017620xxxxxx",
"301762xxxxxxx",
"30176xxxxxxxx",
"3017xxxxxxxxx",
"301xxxxxxxxxx",
"30xxxxxxxxxxx",
"3xxxxxxxxxxxx"
],
"origins_tags": [],
"product_name": "Nutella",
"nova_group": "4",
"editors_tags": [
"tacite",
"kiliweb",
"aldaris",
"date-limite-app",
"asmoth",
"zoneblockscommunity",
"tacinte",
"solveig-yuka",
"coriolys",
"openfoodfacts-contributors",
"julien27"
],
"nutrition_data_per_debug_tags": [],
"states_hierarchy": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-to-be-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"last_edit_dates_tags": [
"2018-09-14",
"2018-09",
"2018"
],
"emb_codes_tags": [],
"labels_hierarchy": [
"en:gluten-free",
"en:no-colorings",
"en:no-preservatives"
],
"ingredients_tags": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"manufacturing_places": "",
"states": "en:to-be-checked, en:complete, en:nutrition-facts-completed, en:ingredients-completed, en:expiration-date-to-be-completed, en:packaging-code-to-be-completed, en:characteristics-completed, en:categories-completed, en:brands-completed, en:packaging-completed, en:quantity-completed, en:product-name-completed, en:photos-validated, en:photos-uploaded",
"ingredients_that_may_be_from_palm_oil_tags": [],
"traces_debug_tags": [],
"generic_name": "",
"completed_t": 1460067180,
"link": "",
"nova_groups": "4",
"states_tags": [
"en:to-be-checked",
"en:complete",
"en:nutrition-facts-completed",
"en:ingredients-completed",
"en:expiration-date-to-be-completed",
"en:packaging-code-to-be-completed",
"en:characteristics-completed",
"en:categories-completed",
"en:brands-completed",
"en:packaging-completed",
"en:quantity-completed",
"en:product-name-completed",
"en:photos-validated",
"en:photos-uploaded"
],
"nutrient_levels_tags": [
"en:fat-in-high-quantity",
"en:saturated-fat-in-high-quantity",
"en:sugars-in-high-quantity",
"en:salt-in-low-quantity"
],
"countries_hierarchy": [
"en:france"
],
"ingredients_ids_debug": [
"sucre",
"huile-de-palme",
"noisettes-13",
"cacao-maigre-7",
"4",
"lait-ecreme-en-poudre-6",
"6",
"lactoserum-en-poudre",
"emulsifiants",
"lecithines",
"soja",
"vanilline"
],
"packaging_tags": [
"pot",
"verre"
],
"categories_prev_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"brands": "Ferrero,Nutella",
"lang_debug_tags": [],
"languages_hierarchy": [
"en:french"
],
"no_nutrition_data": "",
"manufacturing_places_debug_tags": [],
"image_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.100.jpg",
"images": {
"1": {
"uploaded_t": "1459954734",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploader": "tacite"
},
"2": {
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
},
"uploaded_t": "1459954748",
"uploader": "tacite"
},
"3": {
"uploader": "tacite",
"uploaded_t": "1459954755",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 3024,
"h": 4032
}
}
},
"4": {
"uploader": "tacinte",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 2666,
"w": 2000
}
},
"uploaded_t": "1462302434"
},
"5": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2000,
"h": 2666
}
},
"uploaded_t": "1474100347"
},
"6": {
"uploaded_t": "1475213419",
"sizes": {
"100": {
"w": 100,
"h": 75
},
"400": {
"h": 299,
"w": 400
},
"full": {
"h": 1936,
"w": 2592
}
},
"uploader": "openfoodfacts-contributors"
},
"7": {
"uploaded_t": "1480276231",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 299,
"h": 400
},
"full": {
"h": 2592,
"w": 1936
}
},
"uploader": "openfoodfacts-contributors"
},
"8": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": "1485688002",
"sizes": {
"100": {
"w": 74,
"h": 100
},
"400": {
"h": 400,
"w": 297
},
"full": {
"w": 2000,
"h": 2693
}
}
},
"9": {
"uploader": "openfoodfacts-contributors",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 2000,
"h": 2666
}
},
"uploaded_t": "1490427041"
},
"10": {
"uploaded_t": "1492156523",
"sizes": {
"100": {
"h": 100,
"w": 56
},
"400": {
"h": 400,
"w": 225
},
"full": {
"w": 1125,
"h": 2000
}
},
"uploader": "openfoodfacts-contributors"
},
"11": {
"uploaded_t": "1495018452",
"sizes": {
"100": {
"w": 74,
"h": 100
},
"400": {
"w": 297,
"h": 400
},
"full": {
"w": 2000,
"h": 2697
}
},
"uploader": "openfoodfacts-contributors"
},
"12": {
"uploader": "kiliweb",
"uploaded_t": "1497779011",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 1360,
"w": 1021
}
}
},
"14": {
"uploader": "kiliweb",
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"h": 1360,
"w": 1021
}
},
"uploaded_t": "1499944389"
},
"15": {
"uploader": "kiliweb",
"uploaded_t": "1509098605",
"sizes": {
"100": {
"h": 100,
"w": 68
},
"400": {
"w": 273,
"h": 400
},
"full": {
"h": 1360,
"w": 929
}
}
},
"16": {
"sizes": {
"100": {
"w": 75,
"h": 100
},
"400": {
"h": 400,
"w": 300
},
"full": {
"w": 1021,
"h": 1360
}
},
"uploaded_t": "1515071999",
"uploader": "kiliweb"
},
"17": {
"uploader": "kiliweb",
"uploaded_t": "1515072003",
"sizes": {
"100": {
"h": 32,
"w": 100
},
"400": {
"w": 400,
"h": 128
},
"full": {
"h": 927,
"w": 2886
}
}
},
"18": {
"uploader": "openfoodfacts-contributors",
"uploaded_t": 1536902687,
"sizes": {
"100": {
"w": 100,
"h": 69
},
"400": {
"w": 400,
"h": 277
},
"full": {
"w": 3096,
"h": 2142
}
}
},
"ingredients_fr": {
"sizes": {
"100": {
"w": 100,
"h": 32
},
"200": {
"h": 64,
"w": 200
},
"400": {
"w": 400,
"h": 128
},
"full": {
"w": 2886,
"h": 927
}
},
"white_magic": "0",
"y2": 'null',
"x2": 'null',
"x1": 'null',
"rev": "59",
"geometry": "0x0-0-0",
"imgid": "17",
"normalize": "0",
"angle": 'null',
"y1": 'null'
},
"nutrition": {
"normalize": "false",
"rev": "8",
"imgid": "3",
"white_magic": "false",
"sizes": {
"100": {
"h": 71,
"w": 100
},
"200": {
"h": 141,
"w": 200
},
"400": {
"w": 400,
"h": 282
},
"full": {
"w": 1714,
"h": 1209
}
},
"geometry": "1714x1209-463-1772"
},
"front_fr": {
"geometry": "0x0-0-0",
"imgid": "16",
"y1": 'null',
"angle": 'null',
"normalize": "0",
"sizes": {
"100": {
"h": "100",
"w": "75"
},
"200": {
"h": 200,
"w": 150
},
"400": {
"w": 300,
"h": 400
},
"full": {
"w": 1021,
"h": 1360
}
},
"white_magic": "0",
"x1": 'null',
"x2": 'null',
"y2": 'null',
"rev": "57"
},
"front": {
"sizes": {
"100": {
"w": 59,
"h": 100
},
"200": {
"h": 200,
"w": 119
},
"400": {
"h": 400,
"w": 238
},
"full": {
"h": 3508,
"w": 2086
}
},
"geometry": "2086x3508-403-320",
"white_magic": "true",
"imgid": "1",
"normalize": "false",
"rev": "6"
},
"ingredients": {
"white_magic": "false",
"geometry": "958x1291-1108-1227",
"sizes": {
"100": {
"w": 74,
"h": 100
},
"200": {
"w": 148,
"h": 200
},
"400": {
"w": 297,
"h": 400
},
"full": {
"w": 958,
"h": 1291
}
},
"rev": "7",
"normalize": "false",
"imgid": "2"
},
"nutrition_fr": {
"geometry": "0x0-0-0",
"angle": 'null',
"normalize": "0",
"y1": 'null',
"imgid": "12",
"white_magic": "0",
"sizes": {
"100": {
"h": 100,
"w": 75
},
"200": {
"w": 150,
"h": 200
},
"400": {
"w": 300,
"h": 400
},
"full": {
"h": 1360,
"w": 1021
}
},
"rev": "32",
"y2": 'null',
"x1": 'null',
"x2": 'null'
}
},
"unknown_nutrients_tags": [],
"additives_prev_tags": [
"en:e322"
],
"countries": "France",
"emb_codes_20141016": "",
"emb_codes_debug_tags": [],
"max_imgid": "18",
"ingredients_text_fr": "Sucre, huile de palme, _NOISETTES_ 13%, cacao maigre 7,4%, _LAIT_ écrémé en poudre 6,6%, _LACTOSÉRUM_ en poudre, émulsifiants : lécithines [_SOJA_], vanilline.",
"id": "3017620402135",
"categories_lc": "fr",
"complete": 1,
"allergens": "NOISETTES, LAIT, LACTOSÉRUM, SOJA, NOISETTES, LAIT, LACTOSÉRUM, SOJA",
"image_nutrition_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/nutrition_fr.32.400.jpg",
"rev": 60,
"quantity": "1 kg",
"generic_name_fr": "",
"minerals_tags": [],
"emb_codes": "",
"brands_tags": [
"ferrero",
"nutella"
],
"ingredients_text_with_allergens_fr": "Sucre, huile de palme, <span class=\"allergen\"><span class=\"allergen\">NOISETTES</span></span> 13%, cacao maigre 7,4%, <span class=\"allergen\"><span class=\"allergen\">LAIT</span></span> écrémé en poudre 6,6%, <span class=\"allergen\"><span class=\"allergen\">LACTOSÉRUM</span></span> en poudre, émulsifiants : lécithines [<span class=\"allergen\"><span class=\"allergen\">SOJA</span></span>], vanilline.",
"image_nutrition_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/nutrition_fr.32.100.jpg",
"labels": "Sans colorants, Sans conservateurs, Sans gluten",
"labels_tags": [
"en:gluten-free",
"en:no-colorings",
"en:no-preservatives"
],
"languages": {
"en:french": 5
},
"scans_n": 413,
"image_nutrition_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/nutrition_fr.32.200.jpg",
"additives_original_tags": [
"en:e322"
],
"origins": "",
"additives_old_n": 1,
"nucleotides_prev_tags": [],
"serving_size": "15 g",
"ingredients_hierarchy": [
"en:sugar",
"en:palm-oil",
"en:vegetable-oil",
"en:oil-and-vegetable-fat",
"en:oil",
"en:hazelnut",
"en:nut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:milk-powder",
"en:whey-powder",
"en:whey",
"en:milk",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"image_front_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.100.jpg",
"traces": "",
"labels_lc": "fr",
"unique_scans_n": 350,
"vitamins_tags": [],
"cities_tags": [],
"vitamins_prev_tags": [],
"image_front_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.400.jpg",
"labels_prev_tags": [
"en:gluten-free",
"en:no-colorings",
"en:no-preservatives"
],
"categories_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"generic_name_fr_debug_tags": [],
"categories_hierarchy": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:cocoa-and-hazelnuts-spreads",
"en:hazelnut-spreads"
],
"link_debug_tags": [],
"emb_codes_orig": "",
"_id": "3017620402135",
"allergens_tags": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"debug_param_sorted_langs": [
"fr"
],
"additives_old_tags": [
"en:e322"
],
"ingredients_that_may_be_from_palm_oil_n": 0,
"ingredients_text_debug": "Sucre, huile de palme, _NOISETTES_ 13%, cacao maigre 7,4%, _LAIT_ écrémé en poudre 6,6%, _LACTOSÉRUM_ en poudre, émulsifiants : lécithines [_SOJA_], vanilline.",
"nucleotides_tags": [],
"languages_tags": [
"en:french",
"en:1"
],
"packaging_debug_tags": [],
"stores_tags": [
"netto",
"carrefour",
"cora",
"leclerc",
"intermarche"
],
"ingredients_from_palm_oil_n": 1,
"selected_images": {
"nutrition": {
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/nutrition_fr.32.100.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/nutrition_fr.32.200.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/nutrition_fr.32.400.jpg"
}
},
"front": {
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.400.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.100.jpg"
},
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.200.jpg"
}
},
"ingredients": {
"small": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/ingredients_fr.59.200.jpg"
},
"thumb": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/ingredients_fr.59.100.jpg"
},
"display": {
"fr": "https://static.openfoodfacts.org/images/products/301/762/040/2135/ingredients_fr.59.400.jpg"
}
}
},
"ingredients_from_or_that_may_be_from_palm_oil_n": 1,
"brands_debug_tags": [],
"sortkey": 1536902713,
"languages_codes": {
"fr": 5
},
"quality_tags": [
"quantity-not-recognized"
],
"labels_debug_tags": [],
"product_name_debug_tags": [],
"ingredients_n": "10",
"image_ingredients_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/ingredients_fr.59.200.jpg",
"ingredients_debug": [
"Sucre",
",",
'null',
'null',
'null',
" huile de palme",
",",
'null',
'null',
'null',
" _NOISETTES_ 13%",
",",
'null',
'null',
'null',
" cacao maigre 7",
",",
'null',
'null',
'null',
"4%",
",",
'null',
'null',
'null',
" _LAIT_ écrémé en poudre 6",
",",
'null',
'null',
'null',
"6%",
",",
'null',
'null',
'null',
" _LACTOSÉRUM_ en poudre",
",",
'null',
'null',
'null',
" émulsifiants ",
":",
":",
'null',
'null',
" lécithines ",
"[",
"[",
'null',
'null',
"_SOJA_]",
",",
'null',
'null',
'null',
" vanilline."
],
"nutrition_score_warning_no_fruits_vegetables_nuts": 1,
"countries_debug_tags": [],
"creator": "tacite",
"interface_version_created": "20120622",
"url": "https://fr.openfoodfacts.org/produit/3017620402135/nutella-ferrero",
"lang": "fr",
"purchase_places_debug_tags": [],
"informers_tags": [
"tacite",
"kiliweb"
],
"serving_size_debug_tags": [],
"pnns_groups_1_tags": [
"sugary-snacks"
],
"purchase_places": "France",
"ingredients_text_fr_debug_tags": [],
"ingredients_original_tags": [
"en:sugar",
"en:palm-oil",
"en:hazelnut",
"fr:cacao-maigre",
"en:pasteurized-skim-milk",
"en:whey-powder",
"en:emulsifier",
"fr:vanilline",
"en:lecithins",
"en:soya"
],
"correctors_tags": [
"tacite",
"asmoth",
"kiliweb",
"openfoodfacts-contributors",
"coriolys",
"aldaris",
"julien27"
],
"nutrition_score_beverage": 0,
"nutrient_levels": {
"fat": "high",
"salt": "low",
"saturated-fat": "high",
"sugars": "high"
},
"purchase_places_tags": [
"france"
],
"image_ingredients_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/ingredients_fr.59.400.jpg",
"allergens_hierarchy": [
"en:milk",
"en:nuts",
"en:soybeans"
],
"update_key": "pnns12",
"traces_tags": [],
"additives_n": 1,
"misc_tags": [
"en:nutrition-no-fruits-vegetables-nuts",
"en:nutrition-no-fiber-or-fruits-vegetables-nuts",
"en:nutriscore-computed"
],
"categories_prev_tags": [
"en:breakfasts",
"en:spreads",
"en:sweet-spreads",
"fr:pates-a-tartiner",
"en:chocolate-spreads",
"en:hazelnut-spreads",
"en:cocoa-and-hazelnuts-spreads"
],
"ingredients_from_palm_oil_tags": [
"huile-de-palme"
],
"minerals_prev_tags": [],
"created_t": 1459954734,
"unknown_ingredients_n": 0,
"packaging": "pot,verre",
"nutriments": {
"nova-group_100g": "4",
"nova-group": "4",
"fat_unit": "",
"fat": 31.6,
"fat_serving": 4.74,
"saturated-fat_value": "11",
"fat_value": "31.6",
"saturated-fat": "11",
"saturated-fat_unit": "",
"energy_value": "546",
"sugars_100g": 56.8,
"sodium": 0.0448818897637795,
"nutrition-score-fr_100g": 26,
"salt_unit": "",
"fat_100g": 31.6,
"sugars_unit": "",
"fiber_unit": "g",
"energy_unit": "kcal",
"proteins_value": "6",
"proteins": "6",
"carbohydrates_unit": "",
"carbohydrates_value": "57.6",
"fiber_value": "0",
"nova-group_serving": "4",
"carbohydrates": 57.6,
"salt": 0.114,
"carbohydrates_serving": 8.64,
"sugars_serving": 8.52,
"salt_serving": 0.0171,
"proteins_serving": 0.9,
"nutrition-score-uk_100g": 26,
"fiber_100g": "0",
"carbohydrates_100g": 57.6,
"sodium_100g": 0.0448818897637795,
"saturated-fat_100g": "11",
"nutrition-score-fr": 26,
"fiber_serving": "0",
"proteins_100g": "6",
"nutrition-score-uk": 26,
"proteins_unit": "",
"energy_100g": "2284",
"salt_value": "0.114",
"energy_serving": "343",
"fiber": "0",
"sodium_serving": 0.00673,
"energy": "2284",
"salt_100g": 0.114,
"sugars_value": "56.8",
"sugars": 56.8,
"saturated-fat_serving": 1.65
},
"expiration_date_debug_tags": [],
"traces_hierarchy": [],
"fruits-vegetables-nuts_100g_estimate": 0,
"ingredients_text": "Sucre, huile de palme, _NOISETTES_ 13%, cacao maigre 7,4%, _LAIT_ écrémé en poudre 6,6%, _LACTOSÉRUM_ en poudre, émulsifiants : lécithines [_SOJA_], vanilline.",
"quantity_debug_tags": [],
"nutrition_score_debug": " -- energy 6 + sat-fat 10 + fr-sat-fat-for-fats 5 + sugars 10 + sodium 0 - fruits 0% 0 - fiber 0 - proteins 3 -- fsa 26 -- fr 26",
"labels_prev_hierarchy": [
"en:gluten-free",
"en:no-colorings",
"en:no-preservatives"
],
"nutrition_grade_fr": "e",
"product_name_fr_debug_tags": [],
"countries_lc": "fr",
"interface_version_modified": "20150316.jqm2",
"amino_acids_prev_tags": [],
"nutrition_grades": "e",
"_keywords": [
"spread",
"breakfast",
"sweet-spread",
"hazelnut-spread",
"no-coloring",
"gluten-free",
"no-preservative",
"nutella",
"ferrero",
"pates-a-tartiner",
"chocolate-spread",
"cocoa-and-hazelnuts-spread"
],
"nova_groups_tags": [
"en:4-ultra-processed-food-and-drink-products"
],
"pnns_groups_2_tags": [
"sweets"
],
"manufacturing_places_tags": [],
"image_ingredients_thumb_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/ingredients_fr.59.100.jpg",
"allergens_from_ingredients": "NOISETTES, LAIT, LACTOSÉRUM, SOJA, NOISETTES, LAIT, LACTOSÉRUM, SOJA",
"photographers_tags": [
"tacite",
"tacinte",
"openfoodfacts-contributors",
"kiliweb"
],
"expiration_date": "",
"last_image_t": 1536902713,
"additives_prev_n": 1,
"serving_quantity": 15,
"origins_debug_tags": [],
"last_modified_by": 'null',
"entry_dates_tags": [
"2016-04-06",
"2016-04",
"2016"
],
"image_front_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.200.jpg",
"lc": "fr",
"categories_debug_tags": [],
"pnns_groups_1": "Sugary snacks",
"ingredients_text_with_allergens": "Sucre, huile de palme, <span class=\"allergen\"><span class=\"allergen\">NOISETTES</span></span> 13%, cacao maigre 7,4%, <span class=\"allergen\"><span class=\"allergen\">LAIT</span></span> écrémé en poudre 6,6%, <span class=\"allergen\"><span class=\"allergen\">LACTOSÉRUM</span></span> en poudre, émulsifiants : lécithines [<span class=\"allergen\"><span class=\"allergen\">SOJA</span></span>], vanilline.",
"additives_debug_tags": [],
"pnns_groups_2": "Sweets",
"checkers_tags": [],
"code": "3017620402135",
"categories": "Petit-déjeuners, Produits à tartiner, Produits à tartiner sucrés, Pâtes à tartiner, Pâtes à tartiner au chocolat, Pâtes à tartiner aux noisettes, Pâtes à tartiner aux noisettes et au cacao",
"ingredients_n_tags": [
"10",
"1-10"
],
"product_name_fr": "Nutella",
"nova_group_debug": " -- ingredients/en:sugar : 3 -- ingredients/en:whey : 4",
"last_image_dates_tags": [
"2018-09-14",
"2018-09",
"2018"
],
"nutrition_data_per": "100g",
"image_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.400.jpg",
"countries_tags": [
"en:france"
],
"amino_acids_tags": [],
"stores_debug_tags": [],
"nutrition_grades_tags": [
"e"
],
"additives_tags": [
"en:e322"
],
"last_editor": "openfoodfacts-contributors",
"traces_from_ingredients": "",
"stores": "Netto,Carrefour,Cora,Leclerc,Intermarché",
"image_small_url": "https://static.openfoodfacts.org/images/products/301/762/040/2135/front_fr.57.200.jpg",
"ingredients": [
{
"id": "en:sugar",
"rank": 1,
"text": "Sucre"
},
{
"rank": 2,
"id": "en:palm-oil",
"text": "huile de palme"
},
{
"rank": 3,
"id": "en:hazelnut",
"percent": "13",
"text": "_NOISETTES_"
},
{
"percent": "7.4",
"text": "cacao maigre",
"rank": 4,
"id": "fr:cacao-maigre"
},
{
"id": "en:pasteurized-skim-milk",
"rank": 5,
"text": "_LAIT_ écrémé en poudre",
"percent": "6.6"
},
{
"text": "_LACTOSÉRUM_ en poudre",
"id": "en:whey-powder",
"rank": 6
},
{
"text": "émulsifiants",
"id": "en:emulsifier",
"rank": 7
},
{
"text": "vanilline",
"rank": 8,
"id": "fr:vanilline"
},
{
"id": "en:lecithins",
"text": "lécithines"
},
{
"text": "_SOJA_",
"id": "en:soya"
}
],
"additives_prev_original_tags": [
"en:e322"
],
"last_modified_t": 1536902713
}
],
"page": 1,
"page_size": "5"
}
| [
"tchappui@gmail.com"
] | tchappui@gmail.com |
dccaabaea2ce82f404be9435d198c09cebb12357 | e01842bbd6fa4ec5df69e9dc3feda23f0085367c | /blog/posts/urls.py | 9e9f930a79c1eb0d80c0c12fe9aef76893311746 | [] | no_license | Greh/pyladies-gswd | cae38aa9f60e9d0307401e18f97390a3388d32d2 | a6c0cf555068fd4bc7cf8eaead8237e2d04d0f46 | refs/heads/master | 2021-01-20T22:51:08.683674 | 2013-08-24T08:53:39 | 2013-08-24T08:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.post_list_view, name='list'),
url(r'^read/(?P<slug>[-\w]+)/$', views.post_detail_view, name='detail'),
)
| [
"kenneth@gigantuan.net"
] | kenneth@gigantuan.net |
fec6355a19a54de67434605bd6c36ffc7fd76909 | 8633ec7985ffd7f849210b93bc20e632f8ae8707 | /tree/CMSSW_4_2_8_patch7/src/Validation/RecoTrack/python/PostProcessorTracker_cfi.py | 9d4bd7efb05784788385b3074371e74ca53102c8 | [] | no_license | liis/el_track | 2ed5b3b7a64d57473328df0e5faf28808bab6166 | cd7978e5fa95d653bab5825b940911b465172c1a | refs/heads/master | 2016-09-10T20:09:07.882261 | 2015-01-08T14:41:59 | 2015-01-08T14:41:59 | 14,494,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,530 | py | import FWCore.ParameterSet.Config as cms
postProcessorTrack = cms.EDAnalyzer("DQMGenericClient",
subDirs = cms.untracked.vstring("Tracking/Track/*"),
efficiency = cms.vstring(
"effic 'Efficiency vs #eta' num_assoc(simToReco)_eta num_simul_eta",
"efficPt 'Efficiency vs p_{T}' num_assoc(simToReco)_pT num_simul_pT",
"effic_vs_hit 'Efficiency vs hit' num_assoc(simToReco)_hit num_simul_hit",
"effic_vs_phi 'Efficiency vs #phi' num_assoc(simToReco)_phi num_simul_phi",
"effic_vs_dxy 'Efficiency vs Dxy' num_assoc(simToReco)_dxy num_simul_dxy",
"effic_vs_dz 'Efficiency vs Dz' num_assoc(simToReco)_dz num_simul_dz",
"effic_vs_vertpos 'Efficiency vs vertpos' num_assoc(simToReco)_vertpos num_simul_vertpos",
"effic_vs_zpos 'Efficiency vs zpos' num_assoc(simToReco)_zpos num_simul_zpos",
"fakerate 'Fake rate vs #eta' num_assoc(recoToSim)_eta num_reco_eta fake",
"fakeratePt 'Fake rate vs p_{T}' num_assoc(recoToSim)_pT num_reco_pT fake",
"fakerate_vs_hit 'Fake rate vs hit' num_assoc(recoToSim)_hit num_reco_hit fake",
"fakerate_vs_phi 'Fake rate vs phi' num_assoc(recoToSim)_phi num_reco_phi fake",
"fakerate_vs_dxy 'Fake rate vs dxy' num_assoc(recoToSim)_dxy num_reco_dxy fake",
"fakerate_vs_dz 'Fake rate vs dz' num_assoc(recoToSim)_dz num_reco_dz fake"
),
resolution = cms.vstring(
"cotThetares_vs_eta '#sigma(cot(#theta)) vs #eta' cotThetares_vs_eta",
"cotThetares_vs_pt '#sigma(cot(#theta)) vs p_{T}' cotThetares_vs_pt",
"h_dxypulleta 'd_{xy} Pull vs #eta' dxypull_vs_eta",
"dxyres_vs_eta '#sigma(d_{xy}) vs #eta' dxyres_vs_eta",
"dxyres_vs_pt '#sigma(d_{xy}) vs p_{T}' dxyres_vs_pt",
"h_dzpulleta 'd_{z} Pull vs #eta' dzpull_vs_eta",
"dzres_vs_eta '#sigma(d_{z}) vs #eta' dzres_vs_eta",
"dzres_vs_pt '#sigma(d_{z}) vs p_{T}' dzres_vs_pt",
"etares_vs_eta '#sigma(#eta) vs #eta' etares_vs_eta",
"h_phipulleta '#phi Pull vs #eta' phipull_vs_eta",
"h_phipullphi '#phi Pull vs #phi' phipull_vs_phi",
"phires_vs_eta '#sigma(#phi) vs #eta' phires_vs_eta",
"phires_vs_phi '#sigma(#phi) vs #phi' phires_vs_phi",
"phires_vs_pt '#sigma(#phi) vs p_{T}' phires_vs_pt",
"h_ptpulleta 'p_{T} Pull vs #eta' ptpull_vs_eta",
"h_ptpullphi 'p_{T} Pull vs #phi' ptpull_vs_phi",
"ptres_vs_eta '#sigma(p_{T}) vs #eta' ptres_vs_eta",
"ptres_vs_phi '#sigma(p_{T}) vs #phi' ptres_vs_phi",
"ptres_vs_pt '#sigma(p_{T}) vs p_{T}' ptres_vs_pt",
"h_thetapulleta '#theta Pull vs #eta' thetapull_vs_eta",
"h_thetapullphi '#theta Pull vs #phi' thetapull_vs_phi"
),
profile= cms.vstring(
"chi2mean 'mean #chi^{2} vs #eta' chi2_vs_eta",
"chi2mean_vs_phi 'mean #chi^{2} vs #phi' chi2_vs_phi",
"chi2mean_vs_nhits 'mean #chi^{2} vs n. hits' chi2_vs_nhits",
"hits_eta 'mean #hits vs eta' nhits_vs_eta",
"hits_phi 'mean #hits vs #phi' nhits_vs_phi",
"losthits_eta 'mean #lost hits vs #eta' nlosthits_vs_eta",
"PXBhits_eta 'mean # PXB hits vs #eta' nPXBhits_vs_eta",
"PXFhits_eta 'mean # PXF hits vs #eta' nPXFhits_vs_eta",
"TIBhits_eta 'mean # TIB hits vs #eta' nTIBhits_vs_eta",
"TIDhits_eta 'mean # TID hits vs #eta' nTIDhits_vs_eta",
"TOBhits_eta 'mean # TOB hits vs #eta' nTOBhits_vs_eta",
"TEChits_eta 'mean # TEC hits vs #eta' nTEChits_vs_eta",
"LayersWithMeas_eta 'mean # LayersWithMeas vs #eta' nLayersWithMeas_vs_eta",
"PXLlayersWith2dMeas 'mean # PXLlayersWithMeas vs #eta' nPXLlayersWith2dMeas",
"STRIPlayersWithMeas_eta 'mean # STRIPlayersWithMeas vs #eta' nSTRIPlayersWithMeas_eta",
"STRIPlayersWith1dMeas_eta 'mean # STRIPlayersWith1dMeas vs #eta' nSTRIPlayersWith1dMeas_eta",
"STRIPlayersWith2dMeas_eta 'mean # STRIPlayersWith2dMeas vs #eta' nSTRIPlayersWith2dMeas_eta"
),
outputFileName = cms.untracked.string("")
)
| [
"polaarrebane@gmail.com"
] | polaarrebane@gmail.com |
b4d5b5ebd165f661378a8add290298b1ad8bac5c | 36c00fe2afff4818c937e312ce0c6a79f35e2a77 | /7-kyu/dictionary-from-two-lists/python/solution.py | 4a710029d9177cbb5e6dccb8e00376800102be0b | [] | no_license | p-lots/codewars | 0a67b6ee4c91180ff78c648421b9d2d64463ddc3 | 535faeee475c6b398124d6f5002b0e111406e8bb | refs/heads/master | 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 | Python | UTF-8 | Python | false | false | 132 | py | def createDict(keys, values):
while len(values) < len(keys):
values.append(None)
return dict(zip(keys, values))
| [
"paul.calotta@gmail.com"
] | paul.calotta@gmail.com |
a39e9bab1aa016cf4f51db6b1ddcf7edd3ba6b25 | 4d9bdc1444ab73858a123b8273b72e1d74a9233d | /funNLearn/src/main/java/dsAlgo/sort/BucketSort.py | cddef4a09f7d8aba57c892f73c9bfb70035b2b75 | [] | no_license | vishalpmittal/practice-fun | f7ca1389d758f93ddf2ddc3a58f2592b7caabab4 | 727dec2e23e765925a5e7e003fc99aeaf25111e9 | refs/heads/master | 2022-07-11T18:31:49.574410 | 2022-02-26T23:05:12 | 2022-02-26T23:05:12 | 51,132,794 | 0 | 1 | null | 2022-06-29T19:34:05 | 2016-02-05T07:34:32 | JavaScript | UTF-8 | Python | false | false | 1,184 | py | """
Tag: sort
"""
from typing import List
class Solution:
def insertionSort(self, B: List[int]) -> List[int]:
for i in range(1, len(B)):
up = B[i]
j = i - 1
while j >= 0 and B[j] > up:
B[j + 1] = B[j]
j -= 1
B[j + 1] = up
return B
def bucket_sort(self, A: List[int]) -> List[int]:
arr = []
slot_num = 10 # 10 means 10 slots, each
# slot's size is 0.1
for i in range(slot_num):
arr.append([])
# Put array elements in different buckets
for j in A:
index_b = int(slot_num * j)
arr[index_b].append(j)
# Sort individual buckets
for i in range(slot_num):
arr[i] = self.insertionSort(arr[i])
# concatenate the result
k = 0
for i in range(slot_num):
for j in range(len(arr[i])):
A[k] = arr[i][j]
k += 1
return A
assert Solution().bucket_sort([0.897, 0.565, 0.656, 0.1234, 0.665, 0.3434]) == [
0.1234,
0.3434,
0.565,
0.656,
0.665,
0.897,
]
print("Tests Passed!")
| [
"vishalm@vmware.com"
] | vishalm@vmware.com |
10b1c8f6d365f2b3cdbf5aba30dd342bfdf97743 | a8ca62991d552367831daf302833053dec847d1b | /data_structures/python/tree.py | f4b902f99075ec6af63bc84c56e5f2a913b9792c | [] | no_license | krinj/tech-interview-kit | fec0d6e192ee4c3226602c840fcf650e7e10e726 | 86967a9237dd465dbeb9f6ada896eeceae6d553c | refs/heads/master | 2022-01-10T02:57:22.632021 | 2019-05-31T14:02:02 | 2019-05-31T14:02:02 | 185,377,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding: utf-8 -*-
"""
Python binary tree.
"""
from typing import Union
__author__ = "Jakrin Juangbhanich"
__email__ = "juangbhanich.k@gmail.com"
class TNode:
def __init__(self, key: Union[str, int, float], data=None):
self.left: TNode = None
self.right: TNode = None
self.key: Union[str, int, float] = key
self.data = data
class Tree:
def __init__(self):
pass
| [
"juangbhanich.k@gmail.com"
] | juangbhanich.k@gmail.com |
019239c764dc2fe2ed066beb136ef05b5afe9741 | 3d37f595a8aaaa7c5723ddbd6758ecac5147dce2 | /evaluate-reverse-polish-notation/evaluate-reverse-polish-notation.py | b2e0ca728623d1edf0e18e6502be0e0b436af60b | [] | no_license | baggy2797/Leetcode | ec218b155ebb972cd793253f25c3e18117216703 | 469c1541579401768f7a1da55d504a9e8656b21e | refs/heads/main | 2023-06-24T17:03:42.708935 | 2021-07-16T22:31:24 | 2021-07-16T22:31:24 | 342,979,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
# operators = {"+","-","*","/"}
for token in tokens:
if token[-1].isdigit():
stack.append(int(token))
# elif token in operators:
else:
second = stack.pop()
first = stack.pop()
if token == "+":
stack.append(first + second)
elif token == "-":
stack.append(first - second)
elif token == "*":
stack.append(first * second)
else:
stack.append(int(float(first) / second))
return stack.pop() | [
"bhagwataditya226@gmail.com"
] | bhagwataditya226@gmail.com |
86b48d312e17d3cac282ad980fb749a93edd134f | 9209fd8190d0d8a00fadaead164340739a61353a | /tests/test_plugins.py | 2e8d52f821c6f1586c0891882e0b7419f0709888 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | adamwight/coveragepy | 6add3ae9b1bef53aa6a2322c03b2e4bb6927fc3f | 820cb6b9a93595e94e4519e7d54003504ab64814 | refs/heads/master | 2021-09-06T00:04:12.380411 | 2018-01-23T18:29:59 | 2018-01-23T18:29:59 | 119,743,780 | 0 | 0 | null | 2018-01-31T21:10:35 | 2018-01-31T21:10:35 | null | UTF-8 | Python | false | false | 31,806 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for plugins."""
import os.path
import coverage
from coverage import env
from coverage.backward import StringIO
from coverage.control import Plugins
from coverage.misc import CoverageException
import coverage.plugin
from tests.coveragetest import CoverageTest
from tests.helpers import CheckUniqueFilenames
class FakeConfig(object):
"""A fake config for use in tests."""
def __init__(self, plugin, options):
self.plugin = plugin
self.options = options
self.asked_for = []
def get_plugin_options(self, module):
"""Just return the options for `module` if this is the right module."""
self.asked_for.append(module)
if module == self.plugin:
return self.options
else:
return {}
class LoadPluginsTest(CoverageTest):
"""Test Plugins.load_plugins directly."""
def test_implicit_boolean(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
config = FakeConfig("plugin1", {})
plugins = Plugins.load_plugins([], config)
self.assertFalse(plugins)
plugins = Plugins.load_plugins(["plugin1"], config)
self.assertTrue(plugins)
def test_importing_and_configuring(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1"], config))
self.assertEqual(len(plugins), 1)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(config.asked_for, ['plugin1'])
def test_importing_and_configuring_more_than_one(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
self.make_file("plugin2.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].this_is, "me")
self.assertEqual(plugins[0].options, {'a': 'hello'})
self.assertEqual(plugins[1].options, {})
self.assertEqual(config.asked_for, ['plugin1', 'plugin2'])
# The order matters...
config = FakeConfig("plugin1", {'a': 'second'})
plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config))
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0].options, {})
self.assertEqual(plugins[1].this_is, "me")
self.assertEqual(plugins[1].options, {'a': 'second'})
def test_cant_import(self):
with self.assertRaises(ImportError):
_ = Plugins.load_plugins(["plugin_not_there"], None)
def test_plugin_must_define_coverage_init(self):
self.make_file("no_plugin.py", """\
from coverage import CoveragePlugin
Nothing = 0
""")
msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function"
with self.assertRaisesRegex(CoverageException, msg_pat):
list(Plugins.load_plugins(["no_plugin"], None))
class PluginTest(CoverageTest):
"""Test plugins through the Coverage class."""
def test_plugin_imported(self):
# Prove that a plugin will be imported.
self.make_file("my_plugin.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
with open("evidence.out", "w") as f:
f.write("we are here!")
""")
self.assert_doesnt_exist("evidence.out")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["my_plugin"])
cov.start()
cov.stop() # pragma: nested
with open("evidence.out") as f:
self.assertEqual(f.read(), "we are here!")
def test_missing_plugin_raises_import_error(self):
# Prove that a missing plugin will raise an ImportError.
with self.assertRaises(ImportError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"])
cov.start()
cov.stop()
def test_bad_plugin_isnt_hidden(self):
# Prove that a plugin with an error in it will raise the error.
self.make_file("plugin_over_zero.py", """\
1/0
""")
with self.assertRaises(ZeroDivisionError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["plugin_over_zero"])
cov.start()
cov.stop()
def test_plugin_sys_info(self):
self.make_file("plugin_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def sys_info(self):
return [("hello", "world")]
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_sys_info.Plugin -------------------------------",
" hello: world",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_plugin_with_no_sys_info(self):
self.make_file("plugin_no_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
""")
debug_out = StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_no_sys_info"])
cov.load()
out_lines = debug_out.getvalue().splitlines()
expected_end = [
"-- sys: plugin_no_sys_info.Plugin ----------------------------",
"-- end -------------------------------------------------------",
]
self.assertEqual(expected_end, out_lines[-len(expected_end):])
def test_local_files_are_importable(self):
self.make_file("importing_plugin.py", """\
from coverage import CoveragePlugin
import local_module
class MyPlugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(MyPlugin())
""")
self.make_file("local_module.py", "CONST = 1")
self.make_file(".coveragerc", """\
[run]
plugins = importing_plugin
""")
self.make_file("main_file.py", "print('MAIN')")
out = self.run_command("coverage run main_file.py")
self.assertEqual(out, "MAIN\n")
out = self.run_command("coverage html")
self.assertEqual(out, "")
class PluginWarningOnPyTracer(CoverageTest):
"""Test that we get a controlled exception with plugins on PyTracer."""
def test_exception_if_plugins_on_pytracer(self):
if env.C_TRACER:
self.skipTest("This test is only about PyTracer.")
self.make_file("simple.py", """a = 1""")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin1"])
expected_warnings = [
r"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with PyTracer",
]
with self.assert_warnings(cov, expected_warnings):
self.start_import_stop(cov, "simple")
class FileTracerTest(CoverageTest):
"""Tests of plugins that implement file_tracer."""
def setUp(self):
super(FileTracerTest, self).setUp()
if not env.C_TRACER:
self.skipTest("Plugins are only supported with the C tracer.")
class GoodFileTracerTest(FileTracerTest):
"""Tests of file tracer plugin happy paths."""
def test_plugin1(self):
self.make_file("simple.py", """\
import try_xyz
a = 1
b = 2
""")
self.make_file("try_xyz.py", """\
c = 3
d = 4
""")
cov = coverage.Coverage()
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin1"])
# Import the Python file, executing it.
self.start_import_stop(cov, "simple")
_, statements, missing, _ = cov.analysis("simple.py")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [])
zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz"))
_, statements, _, _ = cov.analysis(zzfile)
self.assertEqual(statements, [105, 106, 107, 205, 206, 207])
def make_render_and_caller(self):
"""Make the render.py and caller.py files we need."""
# plugin2 emulates a dynamic tracing plugin: the caller's locals
# are examined to determine the source file and line number.
# The plugin is in tests/plugin2.py.
self.make_file("render.py", """\
def render(filename, linenum):
# This function emulates a template renderer. The plugin
# will examine the `filename` and `linenum` locals to
# determine the source file and line number.
fiddle_around = 1 # not used, just chaff.
return "[{0} @ {1}]".format(filename, linenum)
def helper(x):
# This function is here just to show that not all code in
# this file will be part of the dynamic tracing.
return x+1
""")
self.make_file("caller.py", """\
import sys
from render import helper, render
assert render("foo_7.html", 4) == "[foo_7.html @ 4]"
# Render foo_7.html again to try the CheckUniqueFilenames asserts.
render("foo_7.html", 4)
assert helper(42) == 43
assert render("bar_4.html", 2) == "[bar_4.html @ 2]"
assert helper(76) == 77
# quux_5.html will be omitted from the results.
assert render("quux_5.html", 3) == "[quux_5.html @ 3]"
# For Python 2, make sure unicode is working.
assert render(u"uni_3.html", 2) == "[uni_3.html @ 2]"
""")
# will try to read the actual source files, so make some
# source files.
def lines(n):
"""Make a string with n lines of text."""
return "".join("line %d\n" % i for i in range(n))
self.make_file("bar_4.html", lines(4))
self.make_file("foo_7.html", lines(7))
def test_plugin2(self):
self.make_render_and_caller()
cov = coverage.Coverage(omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
_, statements, missing, _ = cov.analysis("foo_7.html")
self.assertEqual(statements, [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(missing, [1, 2, 3, 6, 7])
self.assertIn("foo_7.html", cov.data.line_counts())
_, statements, missing, _ = cov.analysis("bar_4.html")
self.assertEqual(statements, [1, 2, 3, 4])
self.assertEqual(missing, [1, 4])
self.assertIn("bar_4.html", cov.data.line_counts())
self.assertNotIn("quux_5.html", cov.data.line_counts())
_, statements, missing, _ = cov.analysis("uni_3.html")
self.assertEqual(statements, [1, 2, 3])
self.assertEqual(missing, [1])
self.assertIn("uni_3.html", cov.data.line_counts())
def test_plugin2_with_branch(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
analysis = cov._analyze("foo_7.html")
self.assertEqual(analysis.statements, set([1, 2, 3, 4, 5, 6, 7]))
# Plugins don't do branch coverage yet.
self.assertEqual(analysis.has_arcs(), True)
self.assertEqual(analysis.arc_possibilities(), [])
self.assertEqual(analysis.missing, set([1, 2, 3, 6, 7]))
def test_plugin2_with_text_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
repout = StringIO()
total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"], show_missing=True)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Branch BrPart Cover Missing',
'--------------------------------------------------------',
'bar_4.html 4 2 0 0 50% 1, 4',
'foo_7.html 7 5 0 0 29% 1-3, 6-7',
'--------------------------------------------------------',
'TOTAL 11 7 0 0 36%',
]
self.assertEqual(report, expected)
self.assertAlmostEqual(total, 36.36, places=2)
def test_plugin2_with_html_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.html_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/bar_4_html.html")
self.assert_exists("htmlcov/foo_7_html.html")
def test_plugin2_with_xml_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.xml_report(include=["*.html"], omit=["uni*.html"])
self.assertAlmostEqual(total, 36.36, places=2)
with open("coverage.xml") as fxml:
xml = fxml.read()
for snip in [
'filename="bar_4.html" line-rate="0.5" name="bar_4.html"',
'filename="foo_7.html" line-rate="0.2857" name="foo_7.html"',
]:
self.assertIn(snip, xml)
def test_defer_to_python(self):
# A plugin that measures, but then wants built-in python reporting.
self.make_file("fairly_odd_plugin.py", """\
# A plugin that claims all the odd lines are executed, and none of
# the even lines, and then punts reporting off to the built-in
# Python reporting.
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
return OddTracer(filename)
def file_reporter(self, filename):
return "python"
class OddTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
lineno = frame.f_lineno
if lineno % 2:
return (lineno, lineno)
else:
return (-1, -1)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("unsuspecting.py", """\
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
""")
cov = coverage.Coverage(include=["unsuspecting.py"])
cov.set_option("run:plugins", ["fairly_odd_plugin"])
self.start_import_stop(cov, "unsuspecting")
repout = StringIO()
total = cov.report(file=repout, show_missing=True)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Cover Missing',
'-----------------------------------------------',
'unsuspecting.py 6 3 50% 2, 4, 6',
]
self.assertEqual(report, expected)
self.assertEqual(total, 50)
def test_find_unexecuted(self):
self.make_file("unexecuted_plugin.py", """\
import os
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("foo.py"):
return MyTracer(filename)
def file_reporter(self, filename):
return MyReporter(filename)
def find_executable_files(self, src_dir):
# Check that src_dir is the right value
files = os.listdir(src_dir)
assert "foo.py" in files
assert "unexecuted_plugin.py" in files
return ["chimera.py"]
class MyTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
return (999, 999)
class MyReporter(coverage.FileReporter):
def lines(self):
return set([99, 999, 9999])
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("foo.py", "a = 1\n")
cov = coverage.Coverage(source=['.'])
cov.set_option("run:plugins", ["unexecuted_plugin"])
self.start_import_stop(cov, "foo")
# The file we executed claims to have run line 999.
_, statements, missing, _ = cov.analysis("foo.py")
self.assertEqual(statements, [99, 999, 9999])
self.assertEqual(missing, [99, 9999])
# The completely missing file is in the results.
_, statements, missing, _ = cov.analysis("chimera.py")
self.assertEqual(statements, [99, 999, 9999])
self.assertEqual(missing, [99, 999, 9999])
# But completely new filenames are not in the results.
self.assertEqual(len(cov.get_data().measured_files()), 3)
with self.assertRaises(CoverageException):
cov.analysis("fictional.py")
class BadFileTracerTest(FileTracerTest):
"""Test error handling around file tracer plugins."""
def run_plugin(self, module_name):
"""Run a plugin with the given module_name.
Uses a few fixed Python files.
Returns the Coverage object.
"""
self.make_file("simple.py", """\
import other, another
a = other.f(2)
b = other.f(3)
c = another.g(4)
d = another.g(5)
""")
# The names of these files are important: some plugins apply themselves
# to "*other.py".
self.make_file("other.py", """\
def f(x):
return x+1
""")
self.make_file("another.py", """\
def g(x):
return x-1
""")
cov = coverage.Coverage()
cov.set_option("run:plugins", [module_name])
self.start_import_stop(cov, "simple")
return cov
def run_bad_plugin(self, module_name, plugin_name, our_error=True, excmsg=None, excmsgs=None):
"""Run a file, and see that the plugin failed.
`module_name` and `plugin_name` is the module and name of the plugin to
use.
`our_error` is True if the error reported to the user will be an
explicit error in our test code, marked with an '# Oh noes!' comment.
`excmsg`, if provided, is text that must appear in the stderr.
`excmsgs`, if provided, is a list of messages, one of which must
appear in the stderr.
The plugin will be disabled, and we check that a warning is output
explaining why.
"""
self.run_plugin(module_name)
stderr = self.stderr()
if our_error:
errors = stderr.count("# Oh noes!")
# The exception we're causing should only appear once.
self.assertEqual(errors, 1)
# There should be a warning explaining what's happening, but only one.
# The message can be in two forms:
# Disabling plug-in '...' due to previous exception
# or:
# Disabling plug-in '...' due to an exception:
msg = "Disabling plug-in '%s.%s' due to " % (module_name, plugin_name)
warnings = stderr.count(msg)
self.assertEqual(warnings, 1)
if excmsg:
self.assertIn(excmsg, stderr)
if excmsgs:
self.assertTrue(any(em in stderr for em in excmsgs), "expected one of %r" % excmsgs)
def test_file_tracer_has_no_file_tracer_method(self):
self.make_file("bad_plugin.py", """\
class Plugin(object):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_file_tracer_has_inherited_sourcefilename_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()",
)
def test_plugin_has_inherited_filereporter_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
def source_filename(self):
return "foo.xxx"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
cov = self.run_plugin("bad_plugin")
expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()"
with self.assertRaisesRegex(NotImplementedError, expected_msg):
cov.report()
def test_file_tracer_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
17/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return 3.14159
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="'float' object has no attribute",
)
def test_has_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
23/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
42/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return 17.3
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsgs=[
"expected str, bytes or os.PathLike object, not float",
"'float' object has no attribute",
"object of type 'float' has no len()",
"'float' object is unsubscriptable",
],
)
def test_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
101/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_line_number_range_returns_non_tuple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return 42.23
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple",
)
def test_line_number_range_returns_triple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return (1, 2, 3)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple",
)
def test_line_number_range_returns_pair_of_strings(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return ("5", "7")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="an integer is required",
)
class ConfigurerPluginTest(CoverageTest):
"""Test configuring plugins."""
run_in_temp_dir = False
def test_configurer_plugin(self):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin_config"])
cov.start()
cov.stop() # pragma: nested
excluded = cov.get_option("report:exclude_lines")
self.assertIn("pragma: custom", excluded)
self.assertIn("pragma: or whatever", excluded)
| [
"ned@nedbatchelder.com"
] | ned@nedbatchelder.com |
2b3452773a594169640f1364f39a01c29f048d9b | 34b55781ae90e1f268ce74ee27ecd4909663b728 | /freqlearn/images/draw_impulses.py | 2c1aa223d0c864e568b0e4078afc6057ad1863ba | [] | no_license | fbcotter/thesis | 28ecac0d84f1a5ff9ea104e8e1ac7753beba1c51 | e1a4032ffd6d241694fc173d67051798db22f20d | refs/heads/master | 2023-03-28T14:02:09.835895 | 2020-06-12T09:56:04 | 2020-06-12T09:56:04 | 269,305,306 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
from math import *
import dtcwt
xfm = dtcwt.Transform2d('near_sym_b', 'qshift_b')
x = np.zeros((128,64))
p = xfm.forward(x, nlevels=2)
m = p.highpasses[1].shape[0] // 2
r = int(.8 * m)
fig, ax = plt.subplots()
w = np.array([-1j, 1j, -1j, -1, 1, -1], 'complex')
for l in range(6):
if l < 3:
theta = 15+30*l
else:
theta = 15+30*l - 180
p.highpasses[1][int(m-r*sin(theta*pi/180)), int(r*cos(theta*pi/180)), l] = w[l]
y = xfm.inverse(p)
ax.imshow(y, cmap='gray')
m = y.shape[0] // 2
r = int(.88 * m)
for l in range(6):
if l < 3:
theta = 15+30*l
else:
theta = 15+30*l - 180
y = int(m - r*sin(theta*pi/180))
x = int(r*cos(theta*pi/180))
plt.text(x,y,"{}{}".format(theta, r"$^{\circ}$"), color='b', fontsize=11)
plt.show()
| [
"fbcotter90@gmail.com"
] | fbcotter90@gmail.com |
9c473691dcc0f4e2dc4c0c3a0135eb5eca24fded | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/layout/scene/yaxis/_dtick.py | 508da58d48cdfe7cca997d9b666a67c6367abde3 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 477 | py | import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="dtick", parent_name="layout.scene.yaxis", **kwargs):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
809fd116ee34ba89dc9e99b031eb996a76bab7db | ee9840398bec5750aed10f6ac0095a9238f243f6 | /paypal/request.py | c4f738f2a76716f51909276489792190718f9f34 | [] | no_license | fortable1999/paypal-preapproval-payment | 85881f5cd1307e05b20e1a9421b7b3c7a516c907 | 4a10a5a9909d55822279138ef998f1d601005101 | refs/heads/master | 2021-01-06T20:37:40.708487 | 2014-07-13T08:58:21 | 2014-07-13T08:58:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | """
Paypal API Wrapper
"""
| [
"fortable1999@gmail.com"
] | fortable1999@gmail.com |
be9dbe69c82f3c7fcfb4be1760bb03d41e845213 | f121695e2dff353607fa47fb42482470e03bbf8a | /capitulo_19-Contas_de_usuario/learning_log/learning_logs/migrations/0003_topic_owner.py | 9b0e4de5a7cad3f304adda9c7ccdd8dc8ebadeb9 | [] | no_license | ranog/python_work | 76cbcf784c86fae4482be5383223e4b0a34f4130 | 47c442a90dcf32d5aef70858693a772a3c76a7ac | refs/heads/master | 2022-12-22T11:02:26.482059 | 2021-04-17T01:12:22 | 2021-04-17T01:12:22 | 233,634,221 | 2 | 1 | null | 2022-12-08T07:38:43 | 2020-01-13T15:58:46 | Python | UTF-8 | Python | false | false | 602 | py | # Generated by Django 3.1.4 on 2021-01-23 15:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('learning_logs', '0002_entry'),
]
operations = [
migrations.AddField(
model_name='topic',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
| [
"jprnogueira@yahoo.com.br"
] | jprnogueira@yahoo.com.br |
0cedadd55b1e6d9d6b10d6d76f215c3480001304 | c2636c8bb8964ed2d6c8eca6c922dea27ef597e8 | /main.py | 26767768b29e108b2f1cd7bd3429822c71ab9629 | [] | no_license | rafaeltorrese/webscrapperplatzi | 97db94c5db5f919d198a4aa266aba4ed42a08d73 | f692f18ad17740d4a506a9d560a61180bad1b8ea | refs/heads/master | 2022-12-14T08:37:24.353294 | 2020-09-04T22:53:05 | 2020-09-04T22:53:05 | 292,951,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | import argparse
import logging
import re
from requests.exceptions import HTTPError
from urllib3.exceptions import MaxRetryError
from common import config
import news_page_object as news
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
is_well_formed_link = re.compile(r'^https?://.+/.+$') # https://example.com/hello
is_root_path = re.compile(r'^/.+$') # /some-text
def _news_scraper(news_site_uid):
host = config()["news_site"][news_site_uid]["url"]
logging.info(f"Beginning scraper for {host}")
homepage = news.HomePage(news_site_uid, host)
articles = []
for link in homepage.article_links:
article = _fetch_article(news_site_uid, host, link)
if article:
logger.info("Article fetched!!")
articles.append(article)
print(article.title)
print(len(articles))
def _fetch_article(news_site_uid, host, link):
logger.info(f"Start fetching article at {link}")
article = None
try:
article = news.ArticlePage(news_site_uid, _build_link(host, link))
except (HTTPError, MaxRetryError) as e:
logger.warning("Error while fetching the article", exc_info=False)
if article and not article.body:
logger.warning("Invalid article. There is no body")
return None
return article
def _build_link(host, link):
if is_well_formed_link.match(link):
return link
elif is_root_path.match(link):
return f"{host}{link}"
else:
return f"{host}/{link}"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
news_site_choices = list(config()['news_site'].keys()) # list of keys
parser.add_argument("news_site",
help="The news site you want tow scrape",
type=str,
choices=news_site_choices)
args = parser.parse_args()
_news_scraper(args.news_site)
| [
"torresc.rafael@gmail.com"
] | torresc.rafael@gmail.com |
12c9a312ab2ce470cc22f2e9db120ed67d6bfd2f | e1dd0997239951d4d459b1ba0229493512b0b331 | /mds_py/mds-env/lib/python3.11/site-packages/redis/commands/search/commands.py | f02805ee2b2340b3039d8d19e0cf6c788605ab0e | [] | no_license | alexmy21/Octopus | bd17777cf66654c1e7959654f63ca82b716865b5 | 7844ec616376ec6cd9c1a8b73dbcad9c729557ae | refs/heads/master | 2022-12-22T22:42:29.473433 | 2022-12-21T16:52:09 | 2022-12-21T16:52:09 | 61,543,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,600 | py | import itertools
import time
from typing import Dict, Optional, Union
from redis.client import Pipeline
from redis.utils import deprecated_function
from ..helpers import parse_to_dict
from ._util import to_string
from .aggregation import AggregateRequest, AggregateResult, Cursor
from .document import Document
from .query import Query
from .result import Result
from .suggestion import SuggestionParser
NUMERIC = "NUMERIC"
CREATE_CMD = "FT.CREATE"
ALTER_CMD = "FT.ALTER"
SEARCH_CMD = "FT.SEARCH"
ADD_CMD = "FT.ADD"
ADDHASH_CMD = "FT.ADDHASH"
DROP_CMD = "FT.DROP"
DROPINDEX_CMD = "FT.DROPINDEX"
EXPLAIN_CMD = "FT.EXPLAIN"
EXPLAINCLI_CMD = "FT.EXPLAINCLI"
DEL_CMD = "FT.DEL"
AGGREGATE_CMD = "FT.AGGREGATE"
PROFILE_CMD = "FT.PROFILE"
CURSOR_CMD = "FT.CURSOR"
SPELLCHECK_CMD = "FT.SPELLCHECK"
DICT_ADD_CMD = "FT.DICTADD"
DICT_DEL_CMD = "FT.DICTDEL"
DICT_DUMP_CMD = "FT.DICTDUMP"
GET_CMD = "FT.GET"
MGET_CMD = "FT.MGET"
CONFIG_CMD = "FT.CONFIG"
TAGVALS_CMD = "FT.TAGVALS"
ALIAS_ADD_CMD = "FT.ALIASADD"
ALIAS_UPDATE_CMD = "FT.ALIASUPDATE"
ALIAS_DEL_CMD = "FT.ALIASDEL"
INFO_CMD = "FT.INFO"
SUGADD_COMMAND = "FT.SUGADD"
SUGDEL_COMMAND = "FT.SUGDEL"
SUGLEN_COMMAND = "FT.SUGLEN"
SUGGET_COMMAND = "FT.SUGGET"
SYNUPDATE_CMD = "FT.SYNUPDATE"
SYNDUMP_CMD = "FT.SYNDUMP"
NOOFFSETS = "NOOFFSETS"
NOFIELDS = "NOFIELDS"
NOHL = "NOHL"
NOFREQS = "NOFREQS"
MAXTEXTFIELDS = "MAXTEXTFIELDS"
TEMPORARY = "TEMPORARY"
STOPWORDS = "STOPWORDS"
SKIPINITIALSCAN = "SKIPINITIALSCAN"
WITHSCORES = "WITHSCORES"
FUZZY = "FUZZY"
WITHPAYLOADS = "WITHPAYLOADS"
class SearchCommands:
"""Search commands."""
def batch_indexer(self, chunk_size=100):
"""
Create a new batch indexer from the client with a given chunk size
"""
return self.BatchIndexer(self, chunk_size=chunk_size)
def create_index(
self,
fields,
no_term_offsets=False,
no_field_flags=False,
stopwords=None,
definition=None,
max_text_fields=False,
temporary=None,
no_highlight=False,
no_term_frequencies=False,
skip_initial_scan=False,
):
"""
Create the search index. The index must not already exist.
### Parameters:
- **fields**: a list of TextField or NumericField objects
- **no_term_offsets**: If true, we will not save term offsets in
the index
- **no_field_flags**: If true, we will not save field flags that
allow searching in specific fields
- **stopwords**: If not None, we create the index with this custom
stopword list. The list can be empty
- **max_text_fields**: If true, we will encode indexes as if there
were more than 32 text fields which allows you to add additional
fields (beyond 32).
- **temporary**: Create a lightweight temporary index which will
expire after the specified period of inactivity (in seconds). The
internal idle timer is reset whenever the index is searched or added to.
- **no_highlight**: If true, disabling highlighting support.
Also implied by no_term_offsets.
- **no_term_frequencies**: If true, we avoid saving the term frequencies
in the index.
- **skip_initial_scan**: If true, we do not scan and index.
For more information see `FT.CREATE <https://redis.io/commands/ft.create>`_.
""" # noqa
args = [CREATE_CMD, self.index_name]
if definition is not None:
args += definition.args
if max_text_fields:
args.append(MAXTEXTFIELDS)
if temporary is not None and isinstance(temporary, int):
args.append(TEMPORARY)
args.append(temporary)
if no_term_offsets:
args.append(NOOFFSETS)
if no_highlight:
args.append(NOHL)
if no_field_flags:
args.append(NOFIELDS)
if no_term_frequencies:
args.append(NOFREQS)
if skip_initial_scan:
args.append(SKIPINITIALSCAN)
if stopwords is not None and isinstance(stopwords, (list, tuple, set)):
args += [STOPWORDS, len(stopwords)]
if len(stopwords) > 0:
args += list(stopwords)
args.append("SCHEMA")
try:
args += list(itertools.chain(*(f.redis_args() for f in fields)))
except TypeError:
args += fields.redis_args()
return self.execute_command(*args)
def alter_schema_add(self, fields):
"""
Alter the existing search index by adding new fields. The index
must already exist.
### Parameters:
- **fields**: a list of Field objects to add for the index
For more information see `FT.ALTER <https://redis.io/commands/ft.alter>`_.
""" # noqa
args = [ALTER_CMD, self.index_name, "SCHEMA", "ADD"]
try:
args += list(itertools.chain(*(f.redis_args() for f in fields)))
except TypeError:
args += fields.redis_args()
return self.execute_command(*args)
def dropindex(self, delete_documents=False):
"""
Drop the index if it exists.
Replaced `drop_index` in RediSearch 2.0.
Default behavior was changed to not delete the indexed documents.
### Parameters:
- **delete_documents**: If `True`, all documents will be deleted.
For more information see `FT.DROPINDEX <https://redis.io/commands/ft.dropindex>`_.
""" # noqa
delete_str = "DD" if delete_documents else ""
return self.execute_command(DROPINDEX_CMD, self.index_name, delete_str)
def _add_document(
self,
doc_id,
conn=None,
nosave=False,
score=1.0,
payload=None,
replace=False,
partial=False,
language=None,
no_create=False,
**fields,
):
"""
Internal add_document used for both batch and single doc indexing
"""
if partial or no_create:
replace = True
args = [ADD_CMD, self.index_name, doc_id, score]
if nosave:
args.append("NOSAVE")
if payload is not None:
args.append("PAYLOAD")
args.append(payload)
if replace:
args.append("REPLACE")
if partial:
args.append("PARTIAL")
if no_create:
args.append("NOCREATE")
if language:
args += ["LANGUAGE", language]
args.append("FIELDS")
args += list(itertools.chain(*fields.items()))
if conn is not None:
return conn.execute_command(*args)
return self.execute_command(*args)
def _add_document_hash(
self, doc_id, conn=None, score=1.0, language=None, replace=False
):
"""
Internal add_document_hash used for both batch and single doc indexing
"""
args = [ADDHASH_CMD, self.index_name, doc_id, score]
if replace:
args.append("REPLACE")
if language:
args += ["LANGUAGE", language]
if conn is not None:
return conn.execute_command(*args)
return self.execute_command(*args)
@deprecated_function(
version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead"
)
def add_document(
self,
doc_id,
nosave=False,
score=1.0,
payload=None,
replace=False,
partial=False,
language=None,
no_create=False,
**fields,
):
"""
Add a single document to the index.
### Parameters
- **doc_id**: the id of the saved document.
- **nosave**: if set to true, we just index the document, and don't
save a copy of it. This means that searches will just
return ids.
- **score**: the document ranking, between 0.0 and 1.0
- **payload**: optional inner-index payload we can save for fast
i access in scoring functions
- **replace**: if True, and the document already is in the index,
we perform an update and reindex the document
- **partial**: if True, the fields specified will be added to the
existing document.
This has the added benefit that any fields specified
with `no_index`
will not be reindexed again. Implies `replace`
- **language**: Specify the language used for document tokenization.
- **no_create**: if True, the document is only updated and reindexed
if it already exists.
If the document does not exist, an error will be
returned. Implies `replace`
- **fields** kwargs dictionary of the document fields to be saved
and/or indexed.
NOTE: Geo points shoule be encoded as strings of "lon,lat"
""" # noqa
return self._add_document(
doc_id,
conn=None,
nosave=nosave,
score=score,
payload=payload,
replace=replace,
partial=partial,
language=language,
no_create=no_create,
**fields,
)
@deprecated_function(
version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead"
)
def add_document_hash(self, doc_id, score=1.0, language=None, replace=False):
"""
Add a hash document to the index.
### Parameters
- **doc_id**: the document's id. This has to be an existing HASH key
in Redis that will hold the fields the index needs.
- **score**: the document ranking, between 0.0 and 1.0
- **replace**: if True, and the document already is in the index, we
perform an update and reindex the document
- **language**: Specify the language used for document tokenization.
""" # noqa
return self._add_document_hash(
doc_id, conn=None, score=score, language=language, replace=replace
)
def delete_document(self, doc_id, conn=None, delete_actual_document=False):
"""
Delete a document from index
Returns 1 if the document was deleted, 0 if not
### Parameters
- **delete_actual_document**: if set to True, RediSearch also delete
the actual document if it is in the index
""" # noqa
args = [DEL_CMD, self.index_name, doc_id]
if delete_actual_document:
args.append("DD")
if conn is not None:
return conn.execute_command(*args)
return self.execute_command(*args)
def load_document(self, id):
"""
Load a single document by id
"""
fields = self.client.hgetall(id)
f2 = {to_string(k): to_string(v) for k, v in fields.items()}
fields = f2
try:
del fields["id"]
except KeyError:
pass
return Document(id=id, **fields)
def get(self, *ids):
"""
Returns the full contents of multiple documents.
### Parameters
- **ids**: the ids of the saved documents.
"""
return self.execute_command(MGET_CMD, self.index_name, *ids)
def info(self):
"""
Get info an stats about the the current index, including the number of
documents, memory consumption, etc
For more information see `FT.INFO <https://redis.io/commands/ft.info>`_.
"""
res = self.execute_command(INFO_CMD, self.index_name)
it = map(to_string, res)
return dict(zip(it, it))
def get_params_args(
self, query_params: Union[Dict[str, Union[str, int, float]], None]
):
if query_params is None:
return []
args = []
if len(query_params) > 0:
args.append("params")
args.append(len(query_params) * 2)
for key, value in query_params.items():
args.append(key)
args.append(value)
return args
def _mk_query_args(self, query, query_params: Dict[str, Union[str, int, float]]):
args = [self.index_name]
if isinstance(query, str):
# convert the query from a text to a query object
query = Query(query)
if not isinstance(query, Query):
raise ValueError(f"Bad query type {type(query)}")
args += query.get_args()
args += self.get_params_args(query_params)
return args, query
def search(
self,
query: Union[str, Query],
query_params: Dict[str, Union[str, int, float]] = None,
):
"""
Search the index for a given query, and return a result of documents
### Parameters
- **query**: the search query. Either a text for simple queries with
default parameters, or a Query object for complex queries.
See RediSearch's documentation on query format
For more information see `FT.SEARCH <https://redis.io/commands/ft.search>`_.
""" # noqa
args, query = self._mk_query_args(query, query_params=query_params)
st = time.time()
res = self.execute_command(SEARCH_CMD, *args)
if isinstance(res, Pipeline):
return res
return Result(
res,
not query._no_content,
duration=(time.time() - st) * 1000.0,
has_payload=query._with_payloads,
with_scores=query._with_scores,
)
def explain(
self,
query: Union[str, Query],
query_params: Dict[str, Union[str, int, float]] = None,
):
"""Returns the execution plan for a complex query.
For more information see `FT.EXPLAIN <https://redis.io/commands/ft.explain>`_.
""" # noqa
args, query_text = self._mk_query_args(query, query_params=query_params)
return self.execute_command(EXPLAIN_CMD, *args)
def explain_cli(self, query: Union[str, Query]): # noqa
raise NotImplementedError("EXPLAINCLI will not be implemented.")
def aggregate(
self,
query: Union[str, Query],
query_params: Dict[str, Union[str, int, float]] = None,
):
"""
Issue an aggregation query.
### Parameters
**query**: This can be either an `AggregateRequest`, or a `Cursor`
An `AggregateResult` object is returned. You can access the rows from
its `rows` property, which will always yield the rows of the result.
For more information see `FT.AGGREGATE <https://redis.io/commands/ft.aggregate>`_.
""" # noqa
if isinstance(query, AggregateRequest):
has_cursor = bool(query._cursor)
cmd = [AGGREGATE_CMD, self.index_name] + query.build_args()
elif isinstance(query, Cursor):
has_cursor = True
cmd = [CURSOR_CMD, "READ", self.index_name] + query.build_args()
else:
raise ValueError("Bad query", query)
cmd += self.get_params_args(query_params)
raw = self.execute_command(*cmd)
return self._get_aggregate_result(raw, query, has_cursor)
def _get_aggregate_result(self, raw, query, has_cursor):
if has_cursor:
if isinstance(query, Cursor):
query.cid = raw[1]
cursor = query
else:
cursor = Cursor(raw[1])
raw = raw[0]
else:
cursor = None
if isinstance(query, AggregateRequest) and query._with_schema:
schema = raw[0]
rows = raw[2:]
else:
schema = None
rows = raw[1:]
return AggregateResult(rows, cursor, schema)
def profile(
self,
query: Union[str, Query, AggregateRequest],
limited: bool = False,
query_params: Optional[Dict[str, Union[str, int, float]]] = None,
):
"""
Performs a search or aggregate command and collects performance
information.
### Parameters
**query**: This can be either an `AggregateRequest`, `Query` or string.
**limited**: If set to True, removes details of reader iterator.
**query_params**: Define one or more value parameters.
Each parameter has a name and a value.
"""
st = time.time()
cmd = [PROFILE_CMD, self.index_name, ""]
if limited:
cmd.append("LIMITED")
cmd.append("QUERY")
if isinstance(query, AggregateRequest):
cmd[2] = "AGGREGATE"
cmd += query.build_args()
elif isinstance(query, Query):
cmd[2] = "SEARCH"
cmd += query.get_args()
cmd += self.get_params_args(query_params)
else:
raise ValueError("Must provide AggregateRequest object or " "Query object.")
res = self.execute_command(*cmd)
if isinstance(query, AggregateRequest):
result = self._get_aggregate_result(res[0], query, query._cursor)
else:
result = Result(
res[0],
not query._no_content,
duration=(time.time() - st) * 1000.0,
has_payload=query._with_payloads,
with_scores=query._with_scores,
)
return result, parse_to_dict(res[1])
def spellcheck(self, query, distance=None, include=None, exclude=None):
"""
Issue a spellcheck query
### Parameters
**query**: search query.
**distance***: the maximal Levenshtein distance for spelling
suggestions (default: 1, max: 4).
**include**: specifies an inclusion custom dictionary.
**exclude**: specifies an exclusion custom dictionary.
For more information see `FT.SPELLCHECK <https://redis.io/commands/ft.spellcheck>`_.
""" # noqa
cmd = [SPELLCHECK_CMD, self.index_name, query]
if distance:
cmd.extend(["DISTANCE", distance])
if include:
cmd.extend(["TERMS", "INCLUDE", include])
if exclude:
cmd.extend(["TERMS", "EXCLUDE", exclude])
raw = self.execute_command(*cmd)
corrections = {}
if raw == 0:
return corrections
for _correction in raw:
if isinstance(_correction, int) and _correction == 0:
continue
if len(_correction) != 3:
continue
if not _correction[2]:
continue
if not _correction[2][0]:
continue
# For spellcheck output
# 1) 1) "TERM"
# 2) "{term1}"
# 3) 1) 1) "{score1}"
# 2) "{suggestion1}"
# 2) 1) "{score2}"
# 2) "{suggestion2}"
#
# Following dictionary will be made
# corrections = {
# '{term1}': [
# {'score': '{score1}', 'suggestion': '{suggestion1}'},
# {'score': '{score2}', 'suggestion': '{suggestion2}'}
# ]
# }
corrections[_correction[1]] = [
{"score": _item[0], "suggestion": _item[1]} for _item in _correction[2]
]
return corrections
def dict_add(self, name, *terms):
"""Adds terms to a dictionary.
### Parameters
- **name**: Dictionary name.
- **terms**: List of items for adding to the dictionary.
For more information see `FT.DICTADD <https://redis.io/commands/ft.dictadd>`_.
""" # noqa
cmd = [DICT_ADD_CMD, name]
cmd.extend(terms)
return self.execute_command(*cmd)
def dict_del(self, name, *terms):
"""Deletes terms from a dictionary.
### Parameters
- **name**: Dictionary name.
- **terms**: List of items for removing from the dictionary.
For more information see `FT.DICTDEL <https://redis.io/commands/ft.dictdel>`_.
""" # noqa
cmd = [DICT_DEL_CMD, name]
cmd.extend(terms)
return self.execute_command(*cmd)
def dict_dump(self, name):
"""Dumps all terms in the given dictionary.
### Parameters
- **name**: Dictionary name.
For more information see `FT.DICTDUMP <https://redis.io/commands/ft.dictdump>`_.
""" # noqa
cmd = [DICT_DUMP_CMD, name]
return self.execute_command(*cmd)
def config_set(self, option, value):
"""Set runtime configuration option.
### Parameters
- **option**: the name of the configuration option.
- **value**: a value for the configuration option.
For more information see `FT.CONFIG SET <https://redis.io/commands/ft.config-set>`_.
""" # noqa
cmd = [CONFIG_CMD, "SET", option, value]
raw = self.execute_command(*cmd)
return raw == "OK"
def config_get(self, option):
"""Get runtime configuration option value.
### Parameters
- **option**: the name of the configuration option.
For more information see `FT.CONFIG GET <https://redis.io/commands/ft.config-get>`_.
""" # noqa
cmd = [CONFIG_CMD, "GET", option]
res = {}
raw = self.execute_command(*cmd)
if raw:
for kvs in raw:
res[kvs[0]] = kvs[1]
return res
def tagvals(self, tagfield):
"""
Return a list of all possible tag values
### Parameters
- **tagfield**: Tag field name
For more information see `FT.TAGVALS <https://redis.io/commands/ft.tagvals>`_.
""" # noqa
return self.execute_command(TAGVALS_CMD, self.index_name, tagfield)
def aliasadd(self, alias):
"""
Alias a search index - will fail if alias already exists
### Parameters
- **alias**: Name of the alias to create
For more information see `FT.ALIASADD <https://redis.io/commands/ft.aliasadd>`_.
""" # noqa
return self.execute_command(ALIAS_ADD_CMD, alias, self.index_name)
def aliasupdate(self, alias):
"""
Updates an alias - will fail if alias does not already exist
### Parameters
- **alias**: Name of the alias to create
For more information see `FT.ALIASUPDATE <https://redis.io/commands/ft.aliasupdate>`_.
""" # noqa
return self.execute_command(ALIAS_UPDATE_CMD, alias, self.index_name)
def aliasdel(self, alias):
"""
Removes an alias to a search index
### Parameters
- **alias**: Name of the alias to delete
For more information see `FT.ALIASDEL <https://redis.io/commands/ft.aliasdel>`_.
""" # noqa
return self.execute_command(ALIAS_DEL_CMD, alias)
def sugadd(self, key, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has
a score and string.
If kwargs["increment"] is true and the terms are already in the
server's dictionary, we increment their scores.
For more information see `FT.SUGADD <https://redis.io/commands/ft.sugadd/>`_.
""" # noqa
# If Transaction is not False it will MULTI/EXEC which will error
pipe = self.pipeline(transaction=False)
for sug in suggestions:
args = [SUGADD_COMMAND, key, sug.string, sug.score]
if kwargs.get("increment"):
args.append("INCR")
if sug.payload:
args.append("PAYLOAD")
args.append(sug.payload)
pipe.execute_command(*args)
return pipe.execute()[-1]
def suglen(self, key):
"""
Return the number of entries in the AutoCompleter index.
For more information see `FT.SUGLEN <https://redis.io/commands/ft.suglen>`_.
""" # noqa
return self.execute_command(SUGLEN_COMMAND, key)
def sugdel(self, key, string):
"""
Delete a string from the AutoCompleter index.
Returns 1 if the string was found and deleted, 0 otherwise.
For more information see `FT.SUGDEL <https://redis.io/commands/ft.sugdel>`_.
""" # noqa
return self.execute_command(SUGDEL_COMMAND, key, string)
def sugget(
self, key, prefix, fuzzy=False, num=10, with_scores=False, with_payloads=False
):
"""
Get a list of suggestions from the AutoCompleter, for a given prefix.
Parameters:
prefix : str
The prefix we are searching. **Must be valid ascii or utf-8**
fuzzy : bool
If set to true, the prefix search is done in fuzzy mode.
**NOTE**: Running fuzzy searches on short (<3 letters) prefixes
can be very
slow, and even scan the entire index.
with_scores : bool
If set to true, we also return the (refactored) score of
each suggestion.
This is normally not needed, and is NOT the original score
inserted into the index.
with_payloads : bool
Return suggestion payloads
num : int
The maximum number of results we return. Note that we might
return less. The algorithm trims irrelevant suggestions.
Returns:
list:
A list of Suggestion objects. If with_scores was False, the
score of all suggestions is 1.
For more information see `FT.SUGGET <https://redis.io/commands/ft.sugget>`_.
""" # noqa
args = [SUGGET_COMMAND, key, prefix, "MAX", num]
if fuzzy:
args.append(FUZZY)
if with_scores:
args.append(WITHSCORES)
if with_payloads:
args.append(WITHPAYLOADS)
ret = self.execute_command(*args)
results = []
if not ret:
return results
parser = SuggestionParser(with_scores, with_payloads, ret)
return [s for s in parser]
def synupdate(self, groupid, skipinitial=False, *terms):
"""
Updates a synonym group.
The command is used to create or update a synonym group with
additional terms.
Only documents which were indexed after the update will be affected.
Parameters:
groupid :
Synonym group id.
skipinitial : bool
If set to true, we do not scan and index.
terms :
The terms.
For more information see `FT.SYNUPDATE <https://redis.io/commands/ft.synupdate>`_.
""" # noqa
cmd = [SYNUPDATE_CMD, self.index_name, groupid]
if skipinitial:
cmd.extend(["SKIPINITIALSCAN"])
cmd.extend(terms)
return self.execute_command(*cmd)
def syndump(self):
"""
Dumps the contents of a synonym group.
The command is used to dump the synonyms data structure.
Returns a list of synonym terms and their synonym group ids.
For more information see `FT.SYNDUMP <https://redis.io/commands/ft.syndump>`_.
""" # noqa
raw = self.execute_command(SYNDUMP_CMD, self.index_name)
return {raw[i]: raw[i + 1] for i in range(0, len(raw), 2)}
class AsyncSearchCommands(SearchCommands):
async def info(self):
"""
Get info an stats about the the current index, including the number of
documents, memory consumption, etc
For more information see `FT.INFO <https://redis.io/commands/ft.info>`_.
"""
res = await self.execute_command(INFO_CMD, self.index_name)
it = map(to_string, res)
return dict(zip(it, it))
async def search(
self,
query: Union[str, Query],
query_params: Dict[str, Union[str, int, float]] = None,
):
"""
Search the index for a given query, and return a result of documents
### Parameters
- **query**: the search query. Either a text for simple queries with
default parameters, or a Query object for complex queries.
See RediSearch's documentation on query format
For more information see `FT.SEARCH <https://redis.io/commands/ft.search>`_.
""" # noqa
args, query = self._mk_query_args(query, query_params=query_params)
st = time.time()
res = await self.execute_command(SEARCH_CMD, *args)
if isinstance(res, Pipeline):
return res
return Result(
res,
not query._no_content,
duration=(time.time() - st) * 1000.0,
has_payload=query._with_payloads,
with_scores=query._with_scores,
)
async def aggregate(
self,
query: Union[str, Query],
query_params: Dict[str, Union[str, int, float]] = None,
):
"""
Issue an aggregation query.
### Parameters
**query**: This can be either an `AggregateRequest`, or a `Cursor`
An `AggregateResult` object is returned. You can access the rows from
its `rows` property, which will always yield the rows of the result.
For more information see `FT.AGGREGATE <https://redis.io/commands/ft.aggregate>`_.
""" # noqa
if isinstance(query, AggregateRequest):
has_cursor = bool(query._cursor)
cmd = [AGGREGATE_CMD, self.index_name] + query.build_args()
elif isinstance(query, Cursor):
has_cursor = True
cmd = [CURSOR_CMD, "READ", self.index_name] + query.build_args()
else:
raise ValueError("Bad query", query)
cmd += self.get_params_args(query_params)
raw = await self.execute_command(*cmd)
return self._get_aggregate_result(raw, query, has_cursor)
async def spellcheck(self, query, distance=None, include=None, exclude=None):
"""
Issue a spellcheck query
### Parameters
**query**: search query.
**distance***: the maximal Levenshtein distance for spelling
suggestions (default: 1, max: 4).
**include**: specifies an inclusion custom dictionary.
**exclude**: specifies an exclusion custom dictionary.
For more information see `FT.SPELLCHECK <https://redis.io/commands/ft.spellcheck>`_.
""" # noqa
cmd = [SPELLCHECK_CMD, self.index_name, query]
if distance:
cmd.extend(["DISTANCE", distance])
if include:
cmd.extend(["TERMS", "INCLUDE", include])
if exclude:
cmd.extend(["TERMS", "EXCLUDE", exclude])
raw = await self.execute_command(*cmd)
corrections = {}
if raw == 0:
return corrections
for _correction in raw:
if isinstance(_correction, int) and _correction == 0:
continue
if len(_correction) != 3:
continue
if not _correction[2]:
continue
if not _correction[2][0]:
continue
corrections[_correction[1]] = [
{"score": _item[0], "suggestion": _item[1]} for _item in _correction[2]
]
return corrections
async def config_set(self, option, value):
"""Set runtime configuration option.
### Parameters
- **option**: the name of the configuration option.
- **value**: a value for the configuration option.
For more information see `FT.CONFIG SET <https://redis.io/commands/ft.config-set>`_.
""" # noqa
cmd = [CONFIG_CMD, "SET", option, value]
raw = await self.execute_command(*cmd)
return raw == "OK"
async def config_get(self, option):
"""Get runtime configuration option value.
### Parameters
- **option**: the name of the configuration option.
For more information see `FT.CONFIG GET <https://redis.io/commands/ft.config-get>`_.
""" # noqa
cmd = [CONFIG_CMD, "GET", option]
res = {}
raw = await self.execute_command(*cmd)
if raw:
for kvs in raw:
res[kvs[0]] = kvs[1]
return res
async def load_document(self, id):
"""
Load a single document by id
"""
fields = await self.client.hgetall(id)
f2 = {to_string(k): to_string(v) for k, v in fields.items()}
fields = f2
try:
del fields["id"]
except KeyError:
pass
return Document(id=id, **fields)
async def sugadd(self, key, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has
a score and string.
If kwargs["increment"] is true and the terms are already in the
server's dictionary, we increment their scores.
For more information see `FT.SUGADD <https://redis.io/commands/ft.sugadd>`_.
""" # noqa
# If Transaction is not False it will MULTI/EXEC which will error
pipe = self.pipeline(transaction=False)
for sug in suggestions:
args = [SUGADD_COMMAND, key, sug.string, sug.score]
if kwargs.get("increment"):
args.append("INCR")
if sug.payload:
args.append("PAYLOAD")
args.append(sug.payload)
pipe.execute_command(*args)
return (await pipe.execute())[-1]
async def sugget(
self, key, prefix, fuzzy=False, num=10, with_scores=False, with_payloads=False
):
"""
Get a list of suggestions from the AutoCompleter, for a given prefix.
Parameters:
prefix : str
The prefix we are searching. **Must be valid ascii or utf-8**
fuzzy : bool
If set to true, the prefix search is done in fuzzy mode.
**NOTE**: Running fuzzy searches on short (<3 letters) prefixes
can be very
slow, and even scan the entire index.
with_scores : bool
If set to true, we also return the (refactored) score of
each suggestion.
This is normally not needed, and is NOT the original score
inserted into the index.
with_payloads : bool
Return suggestion payloads
num : int
The maximum number of results we return. Note that we might
return less. The algorithm trims irrelevant suggestions.
Returns:
list:
A list of Suggestion objects. If with_scores was False, the
score of all suggestions is 1.
For more information see `FT.SUGGET <https://redis.io/commands/ft.sugget>`_.
""" # noqa
args = [SUGGET_COMMAND, key, prefix, "MAX", num]
if fuzzy:
args.append(FUZZY)
if with_scores:
args.append(WITHSCORES)
if with_payloads:
args.append(WITHPAYLOADS)
ret = await self.execute_command(*args)
results = []
if not ret:
return results
parser = SuggestionParser(with_scores, with_payloads, ret)
return [s for s in parser]
| [
"alex.mylnikov@hitachivantara.com"
] | alex.mylnikov@hitachivantara.com |
e3bed94cd6c673192d0065f770c84ddcc55c6d0f | 1287ad54942fd2020a217ab12004a541abb62558 | /pythonexercicios/Ex069.py | 3e115b129e95eaefc77d1cfc0493628117d9168a | [] | no_license | LuPessoa/exerciciospy- | 637f24581722e547a62380973ca645b55ff65d90 | b5faad818f978bb13a65922edceb17888b73a407 | refs/heads/master | 2023-05-12T04:16:39.847184 | 2021-06-04T03:02:24 | 2021-06-04T03:02:24 | 374,410,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | tot18 = toth = totm20 = 0
while True:
idade = int(input('Idade: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo:[M/F]')).strip().upper()[0]
if idade >= 18:
tot18 += 1
if sexo == 'M':
toth += 1
if sexo == 'F'and idade < 20:
totm20 += 1
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if resp == 'N':
break
print(f'Total de pessoas com mais de 18 anos : {tot18}')
print(f'Ao todo temos {toth} homens cadastrados')
print(f'E temos {totm20} mulheres com menos de 20 anos')
| [
"lulenemacedo29@gmail.com"
] | lulenemacedo29@gmail.com |
86e3018dfa70529dc368c2fd8577eea8b8c9b37b | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Reconstruction/RecExample/RecExCommission/share/RecExCommissionCommonFlags_jobOptions.py | 28d3249054bcee974759a24c6adda8d0d21b089f | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,532 | py | # ---------------------------------------------------------------------------
# RecExCommon flags
# ---------------------------------------------------------------------------
include.block("RecExCommission/RecExCommissionCommonFlags_jobOptions.py")
# ---------------------------------------------------------------------------
# AthenaCommonFlags
# ---------------------------------------------------------------------------
# start using the new job properties
from AthenaCommon.JobProperties import jobproperties
# AthenaCommon flags
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
# -----------------------------------------------------------------------
# being general reconstruction flags
# -----------------------------------------------------------------------
from RecExConfig.RecFlags import jobproperties, rec
#Number of events to process or generate
athenaCommonFlags.EvtMax=10
# Number of events to skip when reading an input POOL file.
athenaCommonFlags.SkipEvents = 0
# The list of input POOL files containing collections of Raw data objects
# ['myfile.pool.root'] : file in run directory
# ['LFN:mylogicalfile.root']: logical file name as specified in PoolFileCatalog.cml
# ['rfio:/castor/cern.ch/somepath/somefile.pool.root']:file on castor (at CERN)
athenaCommonFlags.PoolRDOInput = ["/afs/cern.ch/atlas/maxidisk/d17/data/TileTest/dig.05AprProd-10000000.root"]
# The list of input ByteStream files containing collections of Raw data objects
#athenaCommonFlags.BSRDOInput = ["/castor/cern.ch/grid/atlas/t0/perm/DAQ/daq.m4_combined.0019783.Default.L1TT-b00000001.LB0001.SFO-1._0001.data"]
athenaCommonFlags.BSRDOInput = ["/castor/cern.ch/grid/atlas/t0/perm/DAQ/daq.m4_combined.0020720.debug.L1TT-b00000001.LB0000.SFO-1._0001.data"]
#athenaCommonFlags.BSRDOInput = ["/castor/cern.ch/grid/atlas/DAQ/M6/daq.NoTag.0043719.physics.HLT_Cosmic_AllTeIDSelected.LB0000.SFO-1._0001.data"]
# The list of input POOL files containing collections of ESD objects
athenaCommonFlags.PoolESDInput = ["castor:/castor/cern.ch/grid/atlas/t0/perm/M4reproc/0020720/FESD/M4.0020720.physics.L1TT-b00000010.FESD.v130026.part0001._lumi0002._0001.1"]
# The name of the output POOL file containing collections of ESD objects
athenaCommonFlags.PoolESDOutput = "ESD.root"
# The list of input POOL files containing collections of TAGs
#athenaCommonFlags.PoolTAGInput = "TAG.root"
# The name of the output POOL file containing collections of TAGs
athenaCommonFlags.PoolTAGOutput = "TAG.root"
rec.PoolTAGCOMOutput="TAGCOM.root"
athenaCommonFlags.PoolInputQuery = "TRT_Cosmic_Tracks"
#AllowIgnoreExistingDataObject
#AllowIgnoreConfigError
#athenaCommonFlags.AthenaCommonFlags
# -----------------------------------------------------------------------
# GlobalFlags
# -----------------------------------------------------------------------
# GlobalFlags
from AthenaCommon.GlobalFlags import globalflags
# Which detector configuration : atlas, combined test beam or commisisoning
# Commissioning: 'commis'
globalflags.DetGeo = 'commis'
# Detector geometry DB tag
globalflags.DetDescrVersion="ATLAS-CommNF-04-00-00"
# data source: Where does the data comes from : real data, geant3 or geant4
# real data: data
# simulation: 'geant4'
globalflags.DataSource = 'data'
# Input format:
# to read from BS: 'bytestream'
# to read from ESD, RDO Pool files = 'pool'
globalflags.InputFormat = 'bytestream'
# ---------------------------------------------------------------------------
# Beam flags to define the
# ---------------------------------------------------------------------------
# Type of data to reconstruct: 'singlebeam','cosmics'
from AthenaCommon.BeamFlags import jobproperties
#jobproperties.Beam.beamType.set_Value_and_Lock("cosmics")
# ---------------------------------------------------------------------------
# BField flags to define the
# ---------------------------------------------------------------------------
# Field configuration: solenoidOn() barrelToroidOn() endcapToroidOn()
from AthenaCommon.BFieldFlags import jobproperties
jobproperties.BField.solenoidOn=False
jobproperties.BField.barrelToroidOn=False
jobproperties.BField.endcapToroidOn=False
# -----------------------------------------------------------------------
# flags to drive the general behaviour of Reconstruction configuration
# -----------------------------------------------------------------------
#from RecExConfig.RecConfFlags import recConfFlags
#RecConfFlags.AllowBackNavigation
#RecConfFlags.AllowDisable
#RecConfFlags.AllowIgnoreConfigError
#RecConfFlags.AllowIgnoreExistingDataObject
#RecConfFlags.RecConfFlags
rec.CBNTAthenaAware = True
rec.doAOD = False
#rec.doAODall
#rec.doAODCaloCells
rec.doCBNT = True
#rec.doCheckDictionary
#rec.doCheckJOT
#rec.doDetailedAuditor
#rec.doDumpMC
#rec.doDumpPoolInputContent
#rec.doDumpProperties
#rec.doDumpTDS
#rec.doDumpTES
#rec.doEdmMonitor
#rec.doESD = True
# rec.doFileMetaData TODO might replace doDetStatus???
rec.doDetStatus = True
#rec.doFloatingPointException
#rec.doHeavyIon
rec.doHist = True
rec.doJiveXML = False
#rec.doLowPt
#rec.doMinimalRec
#rec.doNameAuditor
#rec.doPerfMon = False
rec.doPersint = False
#rec.doRestrictedESD
#rec.doSGAuditor
#rec.doShowSizeStatistics
#rec.doTimeLimit
#rec.doTruth
rec.doWriteAOD = False
#rec.doWriteBS
# If True writes out ESD file
rec.doWriteESD = True
#rec.doWriteRDO
# If True writes out TAG file
rec.doWriteTAG = True
#rec.noESDTrigger
#rec.oldFlagCompatibility
#rec.oldFlagLandMine
#rec.oldFlagTopSteering
# General msg output level ALL,VERBOSE,DEBUG,INFO,WARNING,ERROR,FATAL
rec.OutputLevel = INFO
#rec.readAOD
#If True runs on ESD file
rec.readESD = False
#rec.readRDO
#rec.readTAG
#rec.Rec
#rec.RecAlgs
rec.RootHistoOutput = "monitoring.root"
rec.RootNtupleOutput = "ntuple.root"
#rec.TAGFromRDO
#rec.UserAlgs
rec.doTile = True
rec.doLArg = True
rec.doInDet = True
rec.doMuon = True
## Switch on/off Calibration Ntuple
#from MuonRecExample.MuonRecFlags import muonRecFlags
#from MuonCalibAlgs.MuonCalibFlags import muonCalibFlags
#muonRecFlags.doCalib = True
#muonCalibFlags.Mode = 'trackNtuple'
#muonCalibFlags.EventTag = 'Moore'
### Switch on/off Combined Algorithms
from MuonCombinedRecExample.MuonCombinedRecFlags import muonCombinedRecFlags,muidFlags
muonCombinedRecFlags.doMuGirl = False
muonCombinedRecFlags.doCaloTrkMuId = True
muonCombinedRecFlags.doStaco = False
muonCombinedRecFlags.doMuTag = False
muonCombinedRecFlags.doAODMuons = True # switch off AOD making
muonCombinedRecFlags.doMergeMuons = True # switch off merging for ESD
muidFlags.SegmentTagger = 'MuTagIMO' # switch off by ''
#muidFlags.Extrapolated = 'MuidStandalone' # switch off by ''
muidFlags.Extrapolated = '' # switch off by ''
#muidFlags.Combined = 'MuidCombined' # NOW not run # switch off by ''
muidFlags.Combined = 'CombinedMuonFit' # switch off by ''
# ----------------------------------------------------------------------
# being flags to switch algorithm on/off
# ----------------------------------------------------------------------
from RecExConfig.RecAlgsFlags import recAlgs
#recAlgs.doTrigger
#
# hack...
#
from RecExConfig.RecFlags import rec
if not 'InDetKeys' in dir():
#
# --- setup StoreGate keys (JobProperties!)
#
print "InDetRec_jobOptions: InDetKeys not set before - I import them now"
from InDetRecExample.InDetKeys import InDetKeys
# InDetKeys.lock_JobProperties()
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
912f205c750470d91a847ee210f182d2c0cb001a | 40ac1c3f3dc024e2cdb5e7939abf408cde1b59ee | /webscraper/application/airquality/app_air_quality_scraper.py | 71cc8dafa1476eb9dbe385cb727aab56a7da302c | [] | no_license | plutoese/webscraper | 5319fbdcd2baf7392b2f9fb623eddef8f9c0bbcf | f360a10e0e6da2c250a2c7e5c64ceb74e6919ac6 | refs/heads/master | 2020-04-15T12:43:23.740908 | 2017-12-24T14:22:07 | 2017-12-24T14:22:07 | 61,554,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # coding=UTF-8
# --------------------------------------------------------------
# application_air_quality文件
# @introduction: 抓取空气质量的数据
# @source:天气后报,http://www.tianqihoubao.com/aqi/
# @dependency: requests,bs4及re包
# @author: plutoese
# @date: 2016.06.26
# --------------------------------------------------------------
import sys
from libs.class_mongodb import MongoDB
from libs.class_proxymanager import ProxyManager
from libs.class_staticsitescraper import StaticSiteScraper
# 1. 初始化参数
# 1.1 设置代理服务器
pmanager = ProxyManager()
ramdomproxy = pmanager.random_proxy
# 设置递归深度
sys.setrecursionlimit(1000000)
# 1.2 设置网页爬虫
db = MongoDB()
db.connect('cache','scraper')
pages = [item['webaddress'] for item in db.collection.find({'label':'airquality'},projection={'_id':0,'webaddress':1})]
site_scraper = StaticSiteScraper('http://www.tianqihoubao.com/aqi/',
label='airquality',
proxy=ramdomproxy,
pages=set(pages))
# 2. 开始爬虫
site_scraper.get_links(page_url='',condition='/aqi/[a-zA-Z]+',cache=True) | [
"glen.zhang7@gmail.com"
] | glen.zhang7@gmail.com |
8de8074878d9a6ef66cf4b622dd96bda62582f72 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_ConstantTrend_NoCycle_MLP.py | 8dc04c8230edaa12adfba6c5a152dc044fc81b45 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 161 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['ConstantTrend'] , ['NoCycle'] , ['MLP'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
c02bc04d08e242df89b646f0c2694bc21b9c6238 | a1c6fea0703d7d813a88aae91a7fbb17e06785ea | /seller_app/serializers.py | 49eceb707a8ae0b33dcd8f72f96ac9dadaa7de4c | [] | no_license | warm200/SpokesTribe | bea676b2868272ceab17176d7eb5d98ae7747543 | 8c3671214e317987645aeef4451e590bcb772f7e | refs/heads/master | 2022-01-11T18:12:40.847007 | 2019-02-08T13:08:38 | 2019-02-08T13:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,611 | py | # coding:utf-8
from django.utils import timezone
from drf_extra_fields.fields import Base64ImageField
from rest_framework import serializers
import decimal
import re
import datetime
from MyAbstract.exceptions import ValidationDict211Error
from MyAbstract.fields import CompressBase64ImageField
from MyAbstract.funtions import timetuple, decimal2string
from MyAbstract.serializers import MyModelSerializer
from common.models import MyUser, Shop, ShopPhoto, ShopActivity, ShopDiscount, ShopWallet, ShopBusinessLicence, \
ShopLicence, ShopRequire, ShopCombo, ShopComboGoods, TradeDiscountProfile, TradeTicketProfile, SmsVerCode, \
ShopMemberCard, CardDiscount, ShopMember, RandomNickImage, ShopMemberRecharge, ShopMemberService, \
ShopMemberRechargeTime, ShopMemberRechargeCount, ShopSpoke, FriendGroup, ShopSpokeGroup, TradeMemberProfile, \
TradeShop, TradePay, ShopSpokeRequest, ShopSpokeRequestHistory, CashRecord, ShopPayZSProfile, ShopFlyer, \
ShopFlyerDiscountProfile, ShopFlyerReduceProfile, ShopFlyerExperienceProfile, ShopFlyerExperienceGoods, \
Flyer2Shop, TradeExperienceProfile, TradeRecord, ShopMemberDelSnap, ShopManagerShip, MyUserSellerSettingProfile
from common.function import register_profile
from common.serializers import ShopLicenceSerializer, ShopPhotoSerializer, ShopBusinessLicenceSerializer, \
ShopComboGoodsSerializer, CardDiscountSerializer, AbstractCashRecordListSerializer, AbstractCashRecordSerializer
from APNS import apns_push
class ShopDiscountSerializer(serializers.ModelSerializer):
class Meta:
model = ShopDiscount
fields = ('discount', 'is_valid', 'full_price', 'reduce_price', 'type')
class ShopListAppSerializer(serializers.ModelSerializer):
ico = serializers.ImageField(source='ico_thumbnail')
state = serializers.CharField(source='get_state_display')
is_seller = serializers.SerializerMethodField()
def get_is_seller(self, obj):
return obj.is_seller
class Meta:
model = Shop
fields = ('id', 'name', 'ico', 'address', 'state', 'is_seller')
class ShopSerializer(MyModelSerializer):
ico = CompressBase64ImageField()
face = CompressBase64ImageField()
business_licence = ShopBusinessLicenceSerializer()
licences = ShopLicenceSerializer(many=True)
activity = serializers.CharField()
discount = serializers.CharField()
type = serializers.CharField()
state = serializers.CharField(source='get_state_display')
is_seller = serializers.SerializerMethodField()
is_manager = serializers.SerializerMethodField()
have_pay_pw = serializers.SerializerMethodField()
def get_is_seller(self, obj):
return obj.is_seller
def get_is_manager(self, obj):
return obj.is_manager
def get_have_pay_pw(self, obj):
return obj.have_pay_pw
class Meta:
model = Shop
fields = ('id', 'name', 'address', 'latitude', 'longitude', 'ico', 'face', 'level', 'activity',
'discount', 'business_licence', 'phone', 'type', 'seller', 'managers', 'describe', 'licences',
'open_time', 'close_time', 'convenience', 'state', 'is_seller', 'is_manager', 'have_pay_pw')
read_only_fields = ('state', 'is_seller', 'is_manager', 'have_pay_pw', 'level', 'activity', 'discount',)
custom_fields = ('is_seller', 'is_manager', 'have_pay_pw')
class ShopRequireSerializer(serializers.ModelSerializer):
class Meta:
model = ShopRequire
fields = ('require1', 'require2', 'require3')
class ShopRequestJudgeSerializer(serializers.Serializer):
judge = serializers.BooleanField()
class WalletLooseChangeSerializer(serializers.ModelSerializer):
loose_change = serializers.SerializerMethodField()
def get_loose_change(self, obj):
return decimal2string(obj.loose_change())
class Meta:
model = ShopWallet
fields = ('loose_change',)
read_only_fields = ('loose_change',)
class WalletBonusSerializer(MyModelSerializer):
bonus_pool = serializers.SerializerMethodField()
warning_line = serializers.SerializerMethodField()
has_card = serializers.SerializerMethodField()
attention = serializers.SerializerMethodField()
def get_bonus_pool(self, obj):
return decimal2string(obj.bonus_pool)
def get_warning_line(self, obj):
return decimal2string(obj.bonus_warning)
def get_has_card(self, obj):
return hasattr(obj, 'bankcard')
def get_attention(self, obj):
return obj.attention
class Meta:
model = ShopWallet
fields = ('bonus_pool', 'warning_line', 'has_card', 'attention')
read_only_fields = ('bonus_pool', 'warning_line', 'has_card', 'attention')
class TradeFilterSerializer(serializers.Serializer):
begin_time = serializers.DateField()
end_time = serializers.DateField()
pay_type = serializers.ChoiceField(choices=['wx', 'ali'], required=False)
def validate_end_time(self, value):
value += datetime.timedelta(days=1)
return value
class TradeDiscountSerializer(MyModelSerializer):
trade_number = serializers.CharField(source='trade.trade_number')
total_fee = serializers.CharField(source='trade.total_fee')
discount = serializers.CharField(source='trade.discount')
buyer_ico = serializers.ImageField(source='trade.buyer.ico_thumbnail')
buyer_name = serializers.CharField(source='trade.buyer.nick_name')
def to_representation(self, instance):
representation = super(TradeDiscountSerializer, self).to_representation(instance)
representation['trade_time'] = timetuple(instance.trade.trade_time)
representation['pay_platform_expend'] = decimal2string(instance.pay_platform_expend if instance.pay_platform_expend else 0 + instance.owner_earning if instance.owner_earning else 0)
representation['pay_type'] = ','.join(instance.trade.pay_type(True))
request = self.context.get('request', None)
if instance.ticket:
representation['ticket_type'] = instance.ticket.flyer.get_type_display()
representation['spoker_type'] = 'shop'
url = instance.ticket.shop.ico_thumbnail.url
representation['spoker_ico'] = request.build_absolute_uri(url) if request else url
representation['spoker_name'] = instance.ticket.shop.name
representation['brokerage'] = instance.ticket.flyer.bonus
else:
representation['spoker_type'] = 'spoker'
url = instance.trade.spokesman.ico_thumbnail.url
representation['spoker_ico'] = request.build_absolute_uri(url) if request else url
representation['spoker_name'] = instance.trade.spokesman.nick_name
representation['brokerage'] = instance.brokerage
for item in instance.trade.tradepay_set.all():
if 'member' == item.pay_type:
representation['after'] = decimal2string(item.remain) if item.remain else '0'
break
return representation
class Meta:
model = TradeDiscountProfile
fields = ('activity', 'trade_price', 'brokerage', 'status', 'trade_number', 'discount', 'buyer_ico', 'buyer_name', 'total_fee')
class TradeTicketSerializer(MyModelSerializer):
total_fee = serializers.CharField(source='combo.activity_price')
discount = serializers.CharField(source='trade.discount')
combo_ico = serializers.ImageField(source='combo.ico_thumbnail')
combo_name = serializers.CharField(source='combo.name')
spoker_ico = serializers.ImageField(source='trade.spokesman.ico_thumbnail')
spoker_name = serializers.CharField(source='trade.spokesman.nick_name')
buyer_ico = serializers.ImageField(source='trade.buyer.ico_thumbnail')
buyer_name = serializers.CharField(source='trade.buyer.nick_name')
def to_representation(self, instance):
representation = super(TradeTicketSerializer, self).to_representation(instance)
representation['trade_time'] = timetuple(instance.trade.trade_time)
representation['pay_platform_expend'] = decimal2string(instance.pay_platform_expend if instance.pay_platform_expend else 0 + instance.owner_earning if instance.owner_earning else 0)
representation['pay_type'] = ','.join(instance.trade.pay_type(True))
return representation
class Meta:
model = TradeTicketProfile
fields = ('ticket_number', 'trade_price', 'brokerage', 'trade_price', 'discount',
'combo_ico', 'combo_name', 'spoker_ico', 'spoker_name', 'buyer_ico', 'buyer_name', 'total_fee')
class TradeMemberSerializer(MyModelSerializer):
trade_number = serializers.CharField(source='trade.trade_number')
trade_price = serializers.CharField(source='trade.trade_price')
def to_representation(self, instance):
representation = super(TradeMemberSerializer, self).to_representation(instance)
try:
member = ShopMember.objects.get(shop=instance.trade.shop, user=instance.trade.buyer)
except ShopMember.DoesNotExist:
member = ShopMemberDelSnap.objects.filter(shop=instance.trade.shop, user=instance.trade.buyer).order_by('-id')[0]
representation['member_id'] = member.id
representation['name'] = member.name
representation['trade_time'] = timetuple(instance.trade.trade_time)
representation['pay_type'] = ','.join(instance.trade.pay_type(True))
#充值
representation['gift'] = decimal2string(instance.recharge.gift)
representation['after'] = decimal2string(instance.recharge.after)
return representation
class Meta:
model = TradeMemberProfile
fields = ('trade_price', 'trade_number')
class TradeExperienceSerializer(MyModelSerializer):
ticket_number = serializers.CharField(source='ticket.ticket_number')
ticket_type = serializers.CharField(source='ticket.flyer.get_type_display')
buyer_ico = serializers.ImageField(source='trade.buyer.ico_thumbnail')
buyer_name = serializers.CharField(source='trade.buyer.nick_name')
spoker_ico = serializers.ImageField(source='ticket.shop.ico_thumbnail')
spoker_name = serializers.CharField(source='ticket.shop.name')
brokerage = serializers.SerializerMethodField()
trade_time = serializers.SerializerMethodField()
def get_brokerage(self, obj):
return decimal2string(obj.ticket.flyer.bonus)
def get_trade_time(self, obj):
return timetuple(obj.trade.trade_time)
class Meta:
model = TradeExperienceProfile
fields = ('ticket_number', 'ticket_type', 'buyer_ico', 'buyer_name', 'spoker_ico', 'spoker_name',
'trade_time', 'brokerage')
class TradeShopSerializer(MyModelSerializer):
def to_representation(self, instance):
representation = super(TradeShopSerializer, self).to_representation(instance)
representation['trade_time'] = timetuple(instance.trade_time)
representation['pay_type'] = instance.pay.get_pay_type_display()
representation['trade_price'] = decimal2string(instance.trade_price)
return representation
class Meta:
model = TradeShop
fields = ('trade_number', )
class FriendSerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
ico = serializers.CharField()
work = serializers.CharField(required=False)
class BaseBillAppSerializer(serializers.Serializer):
sale_month = serializers.DecimalField(max_digits=10, decimal_places=2)
sale_day = serializers.DecimalField(max_digits=10, decimal_places=2)
sale_day_wx = serializers.DecimalField(max_digits=10, decimal_places=2)
sale_day_ali = serializers.DecimalField(max_digits=10, decimal_places=2)
sale_yesterday = serializers.DecimalField(max_digits=10, decimal_places=2)
def my_init(self, bill):
self.sale_month, self.sale_day, self.sale_day_wx, self.sale_day_ali, self.sale_yesterday = bill
class ShopComboSerializer(serializers.ModelSerializer):
ico = CompressBase64ImageField()
goods = ShopComboGoodsSerializer(many=True, required=False)
class Meta:
model = ShopCombo
fields = ('id', 'name', 'ico', 'original_price', 'activity_price',
'valid_period_end', 'use_time', 'precautions', 'tips', 'goods', 'festival')
read_only_fields = ('id',)
def create(self, validated_data):
goods = None
if 'goods' in validated_data:
goods = validated_data.pop('goods')
combo = serializers.ModelSerializer.create(self, validated_data)
if goods:
ShopComboGoods.objects.bulk_create([ShopComboGoods(combo=combo, **item) for item in goods])
return combo
def update(self, instance, validated_data):
goods = None
if 'goods' in validated_data:
goods = validated_data.pop('goods')
combo = serializers.ModelSerializer.update(self, instance=instance, validated_data=validated_data)
if goods:
tmp1 = set()
tmp_update = set()
tmp_delete = set()
for item in goods:
if 'id' in item.keys():
tmp1.add(item['id'])
tmp = ShopComboGoods.objects.filter(pk__in=tmp1, combo=combo)
for item in tmp:
tmp_update.add(item.id)
tmp = ShopComboGoods.objects.filter(combo=combo)
for item in tmp:
if item.id not in tmp_update:
tmp_delete.add(item.id)
for item in goods:
if 'id' in item.keys() and item['id'] in tmp_update:
#update
ShopComboGoods.objects.filter(pk=item['id'], combo=combo).update(**item)
else:
#create
ShopComboGoods.objects.create(combo=combo, **item)
#delete
ShopComboGoods.objects.filter(pk__in=tmp_delete).delete()
return combo
class ConfirmTicketSerializer(serializers.Serializer):
tickets = serializers.ListField(child=serializers.CharField())
class BindBankcardSerializer(serializers.Serializer):
card_name = serializers.CharField()
master_name = serializers.CharField()
phone = serializers.CharField()
verification_code = serializers.CharField()
def validate(self, attrs):
try:
obj = SmsVerCode.objects.filter(phone=attrs['phone']).order_by('-id')[0]
except:
raise ValidationDict211Error('验证码未找到.')
code = obj.code
expire_time = obj.expire_time
if code !=attrs['verification_code']:
raise ValidationDict211Error('验证码不匹配.')
elif expire_time < timezone.now():
raise ValidationDict211Error('验证码已过期.')
obj.obsolete = True
obj.save(update_fields=['obsolete'])
return attrs
class SetMinCashSerializer(serializers.Serializer):
min_cash = serializers.IntegerField()
class CardDiscountAppSerializer(serializers.ModelSerializer):
class Meta:
model = CardDiscount
fields = ('type', 'discount', 'full_price', 'reduce_price')
class ShopMemberRechargeSerializer(serializers.ModelSerializer):
member_card_name = serializers.SerializerMethodField()
def get_member_card_name(self, obj):
return obj.member_card.name
class Meta:
model = ShopMemberRecharge
fields = ('id', 'member_card', 'member_card_name', 'recharge', 'gift')
read_only_fields = ('id', )
class ShopMemberServiceSerializer(serializers.ModelSerializer):
class Meta:
model = ShopMemberService
fields = ('id', 'name')
read_only_fields = ('id',)
class ShopMemberRechargeTimeSerializer(MyModelSerializer):
member_card_name = serializers.SerializerMethodField()
service_name = serializers.SerializerMethodField()
def get_member_card_name(self, obj):
return obj.member_card.name
def get_service_name(self, obj):
return obj.service.name
class Meta:
model = ShopMemberRechargeTime
fields = ('id', 'member_card', 'member_card_name', 'recharge', 'service', 'service_name', 'month')
read_only_fields = ('id', 'member_card_name', 'service_name')
class ShopMemberRechargeCountSerializer(serializers.ModelSerializer):
member_card_name = serializers.SerializerMethodField()
service_name = serializers.SerializerMethodField()
def get_member_card_name(self, obj):
return obj.member_card.name
def get_service_name(self, obj):
return obj.service.name
class Meta:
model = ShopMemberRechargeCount
fields = ('id', 'member_card', 'member_card_name', 'recharge', 'service', 'service_name', 'count')
read_only_fields = ('id', 'member_card_name', 'service_name')
class ShopMemberRechargeAllSerializer(serializers.Serializer):
id = serializers.IntegerField()
member_card = serializers.IntegerField()
member_card_name = serializers.CharField()
recharge = serializers.CharField()
service_name = serializers.CharField()
describe = serializers.CharField()
TYPE = (
('gift', '冲送'),
('time', '时间'),
('count', '次数')
)
type = serializers.CharField(max_length=8)
def set(self, id, member_card, member_card_name, recharge, service_name, describe, type):
self.id = id
self.member_card = member_card
self.member_card_name = member_card_name
self.recharge = recharge
self.service_name = service_name
self.describe = describe
self.type = type
class ShopMemberSerializer(serializers.Serializer):
id = serializers.IntegerField(required=False)
name = serializers.CharField()
phone = serializers.CharField()
member_card = serializers.IntegerField(write_only=True, required=False)
card = serializers.CharField(read_only=True)
ico = serializers.ImageField(read_only=True)
loose_change = serializers.DecimalField(max_digits=10, decimal_places=2, required=False)
recharge_id = serializers.IntegerField(write_only=True, required=False)
remark = serializers.CharField(required=False)
def validate(self, attrs):
p2 = re.compile('^1\d{10}$')
if not p2.match(attrs['phone']):
raise serializers.ValidationError("phone number error")
return attrs
def create(self, validated_data):
request = validated_data.pop('request')
phone = validated_data['phone']
if 'id' in validated_data.keys():
validated_data.pop('id')
try:
user = MyUser.objects.get(phone_number=phone)
except MyUser.DoesNotExist:
temp = RandomNickImage.objects.all().order_by('?')[0]
user = MyUser(username=phone, nick_name=temp.nick, ico=temp.image)
user.set_unusable_password()
user.save()
register_profile(request, user)
validated_data['user'] = user
if 'member_card' in validated_data.keys():
validated_data['member_card'] = ShopMemberCard.objects.get(pk=validated_data['member_card'])
elif 'member_card' not in validated_data.keys() and 'recharge_id' in validated_data.keys():
shop_member_recharge = ShopMemberRecharge.objects.get(pk=validated_data['recharge_id'])
validated_data['member_card'] = shop_member_recharge.member_card
validated_data.pop('recharge_id')
if 'loose_change' not in validated_data.keys():
validated_data['loose_change'] = shop_member_recharge.recharge + shop_member_recharge.gift
else:
raise ValidationDict211Error('member_card error')
member = ShopMember.objects.create(**validated_data)
shop_id = validated_data['shop_id']
# existing spoker are coverded into members
if ShopSpoke.objects.filter(shop_id=shop_id, spokesman=user).exists():
ShopSpoke.objects.filter(shop_id=shop_id, spokesman=user).update(spokesman=None, member=member, type='member')
else:
try:
# todo
obj = ShopSpokeRequest.objects.get(resume__user=user, shop_id=shop_id)
ShopSpokeRequestHistory.objects.create(shop=obj.shop, spokesman=obj.resume.user, request_time=obj.request_time, result=False)
obj.delete()
except:
pass
ShopSpoke.objects.create(shop_id=shop_id, member=member, type='member')
group = FriendGroup.objects.get(user=user, type=3)
discount = member.member_card.discount.discount if hasattr(member.member_card, 'discount') else 100
ShopSpokeGroup.objects.create(shop_id=shop_id, group=group, discount=(0.5 * discount + 50))
apns_push.handle_seller_member_create(user, member.shop, member.user.nick_name,
member.member_card.name)
return member
class ShopMemberBatchSerializer(serializers.Serializer):
members = ShopMemberSerializer(many=True)
def create(self, validated_data):
request = validated_data.pop('request')
shop_id = validated_data.pop('shop_id')
failed = []
for item in validated_data['members']:
serializer = ShopMemberSerializer(item)
item['request'] = request
item['shop_id'] = shop_id
phone = item['phone']
if not ShopMember.objects.filter(shop_id=shop_id, user__phone_number=phone).exists():
serializer.create(item)
else:
if 'id' in item.keys():
temp = {'id':item['id'], 'reason':'已是会员'}
failed.append(temp)
return failed
class TradeMemberRechageSerializer(serializers.Serializer):
recharge_id = serializers.IntegerField()
class TradeMemberRechageInputSerializer(serializers.Serializer):
amount = serializers.DecimalField(max_digits=10, decimal_places=2)
gift = serializers.DecimalField(max_digits=10, decimal_places=2, default=0)
class TradeMemberRechageTimeInputSerializer(serializers.Serializer):
amount = serializers.DecimalField(max_digits=10, decimal_places=2)
service_id = serializers.IntegerField()
month = serializers.IntegerField()
class TradeMemberRechageCountInputSerializer(serializers.Serializer):
amount = serializers.DecimalField(max_digits=10, decimal_places=2)
service_id = serializers.IntegerField()
count = serializers.IntegerField()
class TradeMemberConsumeSerializer(serializers.Serializer):
amount = serializers.DecimalField(max_digits=10, decimal_places=2)
class TradeMemberConsumeTimeSerializer(serializers.Serializer):
service_id = serializers.IntegerField()
month = serializers.IntegerField()
class TradeMemberConsumeCountSerializer(serializers.Serializer):
service_id = serializers.IntegerField()
count = serializers.IntegerField()
class MemberRechargeHistoryAppSerializer(serializers.ModelSerializer):
trade_number = serializers.CharField(source='trade.trade_number')
trade_time = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
discribe = serializers.SerializerMethodField()
after = serializers.SerializerMethodField()
def get_trade_time(self, obj):
return timetuple(obj.trade.trade_time)
def get_name(self, obj):
try:
return ShopMember.objects.get(shop=obj.trade.shop, user=obj.trade.buyer).name
except ShopMember.DoesNotExist:
return ShopMemberDelSnap.objects.filter(shop=obj.trade.shop, user=obj.trade.buyer).order_by('-id')[0].name
def get_discribe(self, obj):
# Todo have three choice
if obj.recharge:
return '充{0}赠送{1}元'.format(obj.recharge.recharge, obj.recharge.gift) if obj.recharge.gift > 0 \
else '充{0}元'.format(obj.recharge.recharge)
def get_after(self, obj):
# Todo have three choice
if obj.recharge:
return decimal2string(obj.recharge.after)
class Meta:
model = TradeMemberProfile
fields = ('trade_number', 'trade_time', 'name', 'discribe', 'after')
class MemberRechargeHistorySerializer(serializers.ModelSerializer):
id = serializers.SerializerMethodField()
trade_time = serializers.SerializerMethodField()
recharge = serializers.SerializerMethodField()
amount = serializers.SerializerMethodField()
member_name = serializers.SerializerMethodField()
card_name = serializers.SerializerMethodField()
gift = serializers.SerializerMethodField()
after = serializers.SerializerMethodField()
def get_id(self, obj):
return obj.trade.id
def get_trade_time(self, obj):
return timetuple(obj.trade.trade_time)
def get_recharge(self, obj):
return decimal2string(obj.trade_price)
def get_amount(self, obj):
return decimal2string(obj.trade_price + obj.discount_reduce)
def get_member_name(self, obj):
try:
return ShopMember.objects.get(shop=obj.trade.shop, user=obj.trade.buyer).name
except ShopMember.DoesNotExist:
return ShopMemberDelSnap.objects.filter(shop=obj.trade.shop, user=obj.trade.buyer).order_by('-id')[0].name
def get_card_name(self, obj):
# Todo have three choice
try:
return obj.trade.tradepay_set.all()[0].card_name
except:
return None
def get_gift(self, obj):
# Todo have three choice
if obj.recharge:
return '赠送¥{0}元'.format(obj.recharge.gift) if obj.recharge.gift > 0 else None
def get_after(self, obj):
# Todo have three choice
if obj.recharge:
return obj.recharge.after
class Meta:
model = TradeMemberProfile
fields = ('id', 'recharge', 'trade_time', 'amount', 'member_name', 'card_name', 'gift', 'after')
class MemberConsumeHistoryAppSerializer(serializers.ModelSerializer):
trade_number = serializers.CharField(source='trade.trade_number')
trade_time = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
amount = serializers.SerializerMethodField()
def get_trade_time(self, obj):
return timetuple(obj.trade.trade_time)
def get_amount(self, obj):
return decimal2string(obj.trade_price)
def get_name(self, obj):
try:
return ShopMember.objects.get(shop=obj.trade.shop, user=obj.trade.buyer).name
except ShopMember.DoesNotExist:
return ShopMemberDelSnap.objects.filter(shop=obj.trade.shop, user=obj.trade.buyer).order_by('-id')[0].name
class Meta:
model = TradePay
fields = ('trade_number', 'trade_time', 'name', 'amount', 'remain')
class MemberConsumeHistorySerializer(serializers.ModelSerializer):
trade_time = serializers.SerializerMethodField()
amount = serializers.SerializerMethodField()
member_name = serializers.SerializerMethodField()
is_myself = serializers.SerializerMethodField()
def get_trade_time(self, obj):
return timetuple(obj.trade.trade_time)
def get_amount(self, obj):
return decimal2string(obj.trade_price)
def get_member_name(self, obj):
try:
return ShopMember.objects.get(shop=obj.trade.shop, user=obj.trade.buyer).name
except ShopMember.DoesNotExist:
return ShopMemberDelSnap.objects.filter(shop=obj.trade.shop, user=obj.trade.buyer).order_by('-id')[0].name
def get_is_myself(self, obj):
return True
class Meta:
model = TradePay
fields = ('id', 'card_name', 'trade_time', 'amount', 'member_name', 'is_myself', 'remain')
class TimeFilterSerializer(serializers.Serializer):
begin_time = serializers.DateField(required=False)
end_time = serializers.DateField(required=False)
class CashRecordListSerializer(AbstractCashRecordListSerializer):
class Meta:
model = CashRecord
fields = ('id', 'request_time', 'cash', 'status')
class CashRecordSerializer(AbstractCashRecordSerializer):
class Meta:
model = CashRecord
fields = ('id', 'cash', 'charge', 'status', 'trace',
'request_bank_name', 'request_acc_no')
class BounsPoolSerializer(serializers.Serializer):
total_fee = serializers.DecimalField(max_digits=10, decimal_places=2)
pay_type = serializers.IntegerField()
class ShopFlyerSerializer(MyModelSerializer):
img = CompressBase64ImageField()
bonus_type = serializers.SerializerMethodField()
describe = serializers.SerializerMethodField()
combo_name = serializers.SerializerMethodField()
describe2 = serializers.SerializerMethodField()
class Meta:
model = ShopFlyer
fields = ('id', 'type', 'img', 'bonus', 'valid_period_end', 'describe', 'bonus_type', 'combo_name', 'describe2')
custom_fields = ('bonus_type', 'combo_name', 'describe2')
def get_bonus_type(self, obj):
if 1 == obj.type:
return obj.discount.bonus_type
return None
def get_describe(self, obj):
if 1 == obj.type:
return '店内消费{0}折'.format(obj.discount.discount / 10)
elif 2 == obj.type:
return '店内消费满{0}减{1}'.format(obj.reduce.full_price, obj.reduce.reduce_price)
elif 3 == obj.type:
return '原价: {0}'.format(obj.experience.original_price)
return None
def get_describe2(self, obj):
if 1 == obj.type:
return '最低消费: {0}元'.format(obj.discount.full_price)
return None
def get_combo_name(self, obj):
return obj.experience.name if 3 == obj.type else None
class ShopFlyerNearbySerializer(ShopFlyerSerializer):
shop_name = serializers.CharField(source='shop.name')
phone = serializers.CharField(source='shop.phone')
bonus_type = serializers.SerializerMethodField()
distance = serializers.SerializerMethodField()
league = serializers.SerializerMethodField()
league_id =serializers.SerializerMethodField()
status = serializers.SerializerMethodField()
describe2 = serializers.SerializerMethodField()
def get_status(self, obj):
if obj.status == 'limit':
status = 'limit'
elif Flyer2Shop.objects.filter(flyer=obj).count() >= 20:
status = 'full'
else:
status = 'normal'
return status
def get_describe2(self, obj):
if 1 == obj.type:
return '最低消费: {0}元'.format(obj.discount.full_price)
return None
class Meta:
model = ShopFlyer
fields = ('id', 'shop_id', 'type', 'img', 'bonus', 'valid_period_end', 'describe', 'combo_name',
'shop_name', 'phone', 'distance', 'league', 'league_id', 'status', 'describe2', 'bonus_type')
custom_fields = ('combo_name', 'league_id', 'describe2', 'bonus_type')
def get_bonus_type(self, obj):
if 1 == obj.type:
return obj.discount.bonus_type
return None
def get_distance(self, obj):
return int(obj.distance)
def get_league(self, obj):
return obj.league
def get_league_id(self, obj):
return obj.league_id if hasattr(obj, 'league_id') else None
class ShopFlyerInnerSerializer(MyModelSerializer):
img = CompressBase64ImageField()
type = serializers.IntegerField(required=False)
class Meta:
model = ShopFlyer
fields = ('id', 'img', 'bonus', 'valid_period_end',
'day_begin', 'day_end', 'festival', 'precautions', 'tips', 'type')
class ShopFlyerProfileSerializer(serializers.Serializer):
flyer = ShopFlyerInnerSerializer(required=False)
bonus_type = serializers.IntegerField(required=False)
discount = serializers.IntegerField(required=False)
full_price = serializers.IntegerField(required=False)
reduce_price = serializers.IntegerField(required=False)
name = serializers.CharField(required=False)
original_price = serializers.DecimalField(max_digits=10, decimal_places=2, required=False)
goods = ShopComboGoodsSerializer(many=True, required=False)
def validate(self, attrs):
if 'bonus_type' in attrs.keys() and attrs['bonus_type'] not in (1, 2):
raise ValidationDict211Error('bonus_type error')
return attrs
class AbstractShopFlyerProfileSerializer(serializers.ModelSerializer):
flyer = ShopFlyerInnerSerializer()
shop_name = serializers.CharField(source='flyer.shop.name', read_only=True)
shop_address = serializers.CharField(source='flyer.shop.address', read_only=True)
shop_phone = serializers.CharField(source='flyer.shop.phone', read_only=True)
describe = serializers.SerializerMethodField()
class Meta:
fields = ('flyer', 'shop_name', 'shop_address', 'shop_phone', 'describe')
def get_describe(self, obj):
if 1 == obj.flyer.type:
return '店内消费{0}折'.format(obj.discount / 10)
elif 2 == obj.flyer.type:
return '店内消费满{0}减{1}'.format(obj.full_price, obj.reduce_price)
elif 3 == obj.flyer.type:
return '原价: {0}'.format(obj.original_price)
return None
class ShopFlyerDiscountSerializer(AbstractShopFlyerProfileSerializer):
class Meta:
model = ShopFlyerDiscountProfile
fields = tuple(set(['bonus_type', 'discount', 'full_price']) | set(AbstractShopFlyerProfileSerializer.Meta.fields))
def validate(self, attrs):
if attrs['bonus_type'] not in (1, 2):
raise ValidationDict211Error('bonus_type error')
elif 2 == attrs['bonus_type']:
if attrs['flyer']['bonus'] > 100:
raise ValidationDict211Error('bonus error')
attrs['flyer']['bonus'] = int(attrs['flyer']['bonus'])
return attrs
def create(self, validated_data):
shop_id = validated_data.pop('shop_id')
flyer = validated_data.pop('flyer')
if ShopFlyer.objects.filter(shop_id=shop_id, status='online').exists():
raise serializers.ValidationError('has one flyer')
flyer = ShopFlyer.objects.create(shop_id=shop_id, type=1, **flyer)
profile = self.Meta.model.objects.create(flyer=flyer, **validated_data)
return profile
class ShopFlyerReduceSerializer(AbstractShopFlyerProfileSerializer):
class Meta:
model = ShopFlyerReduceProfile
fields = tuple(set(['full_price', 'reduce_price']) | set(AbstractShopFlyerProfileSerializer.Meta.fields))
def create(self, validated_data):
shop_id = validated_data.pop('shop_id')
flyer = validated_data.pop('flyer')
if ShopFlyer.objects.filter(shop_id=shop_id, status='online').exists():
raise serializers.ValidationError('has one flyer')
flyer = ShopFlyer.objects.create(shop_id=shop_id, type=2, **flyer)
profile = self.Meta.model.objects.create(flyer=flyer, **validated_data)
return profile
class ShopFlyerExperienceGoodsSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
class Meta:
model = ShopFlyerExperienceGoods
fields = ('id', 'name', 'price', 'num', 'unit')
read_only_fields = ('id', )
class ShopFlyerExperienceSerializer(AbstractShopFlyerProfileSerializer):
goods = ShopComboGoodsSerializer(many=True)
class Meta:
model = ShopFlyerExperienceProfile
fields = tuple(set(['name', 'original_price', 'goods']) | set(AbstractShopFlyerProfileSerializer.Meta.fields))
def create(self, validated_data):
shop_id = validated_data.pop('shop_id')
goods = validated_data.pop('goods')
flyer = validated_data.pop('flyer')
if ShopFlyer.objects.filter(shop_id=shop_id, status='online').exists():
raise serializers.ValidationError('has one flyer')
flyer = ShopFlyer.objects.create(shop_id=shop_id, type=3, **flyer)
profile = self.Meta.model.objects.create(flyer=flyer, **validated_data)
ShopFlyerExperienceGoods.objects.bulk_create(
[ShopFlyerExperienceGoods(combo=profile, **item) for item in goods])
return profile
class Flyer2ShopMineSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='shop.name')
ico = serializers.ImageField(source='shop.ico')
distance = serializers.SerializerMethodField()
phone = serializers.CharField(source='shop.phone')
bonus_type = serializers.SerializerMethodField()
bonus = serializers.SerializerMethodField()
def get_distance(self, obj):
return int(obj.distance)
def get_bonus_type(self, obj):
flyer = obj.flyer
return 2 if flyer.type == 1 and flyer.discount.bonus_type == 2 else 1
def get_bonus(self, obj):
return decimal2string(obj.flyer.bonus)
class Meta:
model = Flyer2Shop
fields = ('id', 'name', 'ico', 'distance', 'phone', 'bonus_type', 'bonus', 'count', 'flyer_id')
class Flyer2ShopOhterSerializer(MyModelSerializer):
img = CompressBase64ImageField()
describe = serializers.SerializerMethodField()
combo_name = serializers.SerializerMethodField()
bonus_type = serializers.SerializerMethodField()
shop_name = serializers.CharField(source='shop.name')
phone = serializers.CharField(source='shop.phone')
distance = serializers.SerializerMethodField()
id = serializers.SerializerMethodField()
flyer_id = serializers.IntegerField(source='id')
status = serializers.SerializerMethodField()
describe2 = serializers.SerializerMethodField()
def get_id(self, obj):
return obj.temp
def get_describe(self, obj):
if 1 == obj.type:
return '店内消费{0}折'.format(obj.discount.discount / 10)
elif 2 == obj.type:
return '店内消费满{0}减{1}'.format(obj.reduce.full_price, obj.reduce.reduce_price)
elif 3 == obj.type:
return '原价: {0}'.format(obj.experience.original_price)
return None
def get_describe2(self, obj):
if 1 == obj.type:
return '最低消费: {0}元'.format(obj.discount.full_price)
return None
def get_bonus_type(self, obj):
if 1 == obj.type:
return obj.discount.bonus_type
return None
def get_combo_name(self, obj):
return obj.experience.name if 3 == obj.type else None
def get_distance(self, obj):
return int(obj.distance)
def get_status(self, obj):
if obj.status == 'limit':
status = 'limit'
else:
status = 'normal'
return status
class Meta:
model = ShopFlyer
fields = ('id', 'type', 'img', 'bonus', 'valid_period_end', 'describe', 'combo_name',
'shop_name', 'phone', 'distance', 'shop_id', 'flyer_id', 'status', 'describe2', 'bonus_type')
custom_fields = ('combo_name', 'describe2', 'bonus_type')
class FlyerTradeOtherSerializer(serializers.Serializer):
buyer_name = serializers.CharField()
buyer_ico = serializers.CharField()
shop_name = serializers.CharField()
shop_ico = serializers.CharField()
time = serializers.IntegerField()
trade_type = serializers.CharField()
type = serializers.IntegerField()
bonus = serializers.CharField()
bonus_type = serializers.SerializerMethodField()
def get_bonus_type(self, obj):
if 1 == obj.type:
return obj.discount.bonus_type
return None
class FlyerTradeMineSerializer(FlyerTradeOtherSerializer):
number = serializers.CharField()
trade_price = serializers.CharField()
class Interface1RequestSerializer(serializers.Serializer):
type = serializers.CharField()
class Interface1ResponseSerializer(serializers.Serializer):
version = serializers.CharField()
no_disturb = serializers.BooleanField()
apns_voice = serializers.BooleanField()
class ShopHomeSerializer(serializers.ModelSerializer):
ico = serializers.ImageField(source='ico_thumbnail')
is_seller = serializers.SerializerMethodField()
is_manager = serializers.SerializerMethodField()
def get_is_seller(self, obj):
return obj.is_seller
def get_is_manager(self, obj):
return obj.is_manager
class Meta:
model = Shop
fields = ('id', 'name', 'ico', 'is_seller', 'is_manager')
read_only_fields = ('ico', 'face',)
class HomeAppSerializer(serializers.Serializer):
shop = ShopHomeSerializer()
bill = BaseBillAppSerializer()
def my_init(self, shop, bill):
self.shop = shop
self.bill = bill
class ShopMemberListAppSerializer(serializers.ModelSerializer):
ico = serializers.ImageField(source='user.ico_thumbnail')
card = serializers.CharField(source='member_card.name')
class Meta:
model = ShopMember
fields = ('id', 'name', 'ico', 'card')
class ShopMemberCardListSerializer(serializers.ModelSerializer):
shop_name = serializers.CharField(source='shop.name')
class Meta:
model = ShopMemberCard
fields = ('id', 'name', 'image', 'shop_name')
class ShopRequireAppSerializer(serializers.ModelSerializer):
request_count = serializers.SerializerMethodField()
max_count = serializers.IntegerField(source='shop.max_spoke')
current_count = serializers.IntegerField(source='shop.spoke_count')
def get_request_count(self, obj):
return obj.shop.shopspokerequest_set.count()
class Meta:
model = ShopRequire
fields = ('require1', 'require2', 'require3', 'request_count', 'max_count', 'current_count')
class ShopSpokerListSerializer(serializers.Serializer):
id = serializers.IntegerField()
ico = serializers.CharField()
name = serializers.CharField()
is_seller = serializers.BooleanField(required=False)
is_manager = serializers.BooleanField()
count = serializers.IntegerField()
sale = serializers.CharField()
class ShopSpokerSerializer(serializers.Serializer):
id = serializers.IntegerField()
ico = serializers.CharField()
name = serializers.CharField()
phone = serializers.CharField()
begin_time = serializers.IntegerField()
is_seller = serializers.BooleanField(required=False)
is_manager = serializers.BooleanField(required=False)
count = serializers.IntegerField()
sale = serializers.CharField()
brokerage = serializers.CharField()
class ShopSpokeRequestListSerializer(MyModelSerializer):
ico = serializers.ImageField(source='ico_thumbnail')
name = serializers.CharField(source='nick_name')
phone = serializers.CharField(source='phone_number')
class Meta:
model = MyUser
fields = ('id', 'ico', 'name', 'phone')
class ShopSpokesRequestAppSerializer(MyModelSerializer):
ico = serializers.ImageField(source='ico_thumbnail')
name = serializers.CharField(source='nick_name')
phone = serializers.CharField(source='phone_number')
real_name = serializers.CharField(source='nationalid.real_name', required=False)
work = serializers.CharField(source='resume.work', required=False)
resume = serializers.CharField(source='resume.resume')
request_time = serializers.SerializerMethodField()
def get_request_time(self, obj):
return timetuple(obj.request_time)
class Meta:
model = MyUser
fields = ('id', 'ico', 'name', 'phone', 'female', 'birthday', 'real_name', 'work', 'resume', 'request_time')
custom_fields = ('female', 'birthday', 'real_name', 'work')
class ShopSpokerResumeSerializer(MyModelSerializer):
ico = serializers.ImageField(source='ico_thumbnail')
name = serializers.CharField(source='nick_name')
phone = serializers.CharField(source='phone_number')
real_name = serializers.CharField(source='nationalid.real_name', required=False)
work = serializers.CharField(source='resume.work', required=False)
resume = serializers.CharField(source='resume.resume')
class Meta:
model = MyUser
fields = ('id', 'ico', 'name', 'phone', 'female', 'birthday', 'real_name', 'work', 'resume')
custom_fields = ('birthday', 'real_name', 'work')
class ShopMemberCardSerializer(MyModelSerializer):
class ShopMemberRechargeInnerSerializer(MyModelSerializer):
class Meta:
model = ShopMemberRecharge
fields = ('recharge', 'gift')
image = Base64ImageField()
discount = CardDiscountAppSerializer(required=False)
recharge = ShopMemberRechargeInnerSerializer(many=True, required=False)
def create(self, validated_data):
if 'discount' in validated_data:
discount = validated_data.pop('discount')
else:
discount = {}
recharge = None
if 'recharge' in validated_data:
recharge = validated_data.pop('recharge')
shop_member_card = serializers.ModelSerializer.create(self, validated_data)
CardDiscount.objects.create(member_card=shop_member_card, **discount)
if recharge:
ShopMemberRecharge.objects.bulk_create([ShopMemberRecharge(shop=shop_member_card.shop, member_card=shop_member_card, **item) for item in recharge])
return shop_member_card
def update(self, instance, validated_data):
discount = None
if 'discount' in validated_data.keys():
discount = validated_data.pop('discount')
recharge = None
if 'recharge' in validated_data.keys():
recharge = validated_data.pop('recharge')
shop_member_card = serializers.ModelSerializer.update(self, instance=instance, validated_data=validated_data)
shop_member_card.save()
if discount:
old_discount = shop_member_card.discount.discount if hasattr(shop_member_card, 'discount') else 100
if hasattr(shop_member_card, 'discount'):
CardDiscount.objects.filter(member_card=shop_member_card).update(**discount)
member_discount = CardDiscount.objects.get(member_card=shop_member_card)
else:
discount['member_card'] = shop_member_card
member_discount = CardDiscount.objects.create(**discount)
queryset = ShopSpoke.objects.filter(shop=shop_member_card.shop, type='member')
queryset = ShopSpokeGroup.objects.filter(shop=shop_member_card.shop, group__user__in=[item.member.user_id for item in queryset])
for query in queryset:
if old_discount < 100:
tmp = (query.member_discount - old_discount) / decimal.Decimal(100.0 - old_discount)
query.member_discount = 100 * tmp - tmp * member_discount.discount + member_discount.discount
query.save(update_fields=['member_discount'])
if recharge:
ShopMemberRecharge.objects.filter(shop=shop_member_card.shop, member_card=shop_member_card).delete()
ShopMemberRecharge.objects.bulk_create([ShopMemberRecharge(shop=shop_member_card.shop, member_card=shop_member_card, **item) for item in recharge])
return shop_member_card
class Meta:
model = ShopMemberCard
fields = ('id', 'name', 'image', 'discount', 'level', 'recharge')
read_only_fields = ('id', )
custom_fields = ('discount', 'recharge')
class ShopComboListSerializer(serializers.ModelSerializer):
ico = CompressBase64ImageField()
class Meta:
model = ShopCombo
fields = ('id', 'name', 'ico', 'original_price', 'activity_price', 'valid_period_end', 'status')
class BonusSerializer(serializers.Serializer):
bonus = WalletBonusSerializer()
discount = ShopDiscountSerializer()
class TradeSerializer(serializers.Serializer):
buyer_ico = serializers.CharField()
type = serializers.CharField()
time = serializers.IntegerField()
total_fee = serializers.CharField()
number = serializers.CharField()
remark = serializers.CharField(required=False)
pay_type = serializers.CharField()
class TradeBonusSerializer(serializers.Serializer):
time = serializers.IntegerField()
type = serializers.CharField()
buyer_ico = serializers.CharField()
brokerage = serializers.CharField()
number = serializers.CharField()
class WithdrawRecordSerializer(serializers.Serializer):
time = serializers.IntegerField()
amount = serializers.CharField()
class ShopManagerCreateSerializer(serializers.Serializer):
phone = serializers.CharField()
name = serializers.CharField()
password = serializers.CharField(required=False)
def create(self, validated_data):
phone = validated_data['phone']
name = validated_data['name']
try:
user = MyUser.objects.get(phone_number=phone)
except MyUser.DoesNotExist:
try:
password = validated_data['password']
temp = RandomNickImage.objects.all().order_by('?')[0]
user = MyUser(username=phone, nick_name=name, phone_number=phone, ico=temp.image)
user.set_password(password)
user.save()
except:
raise ValidationDict211Error('error')
try:
ship = ShopManagerShip.objects.create(shop_id=validated_data['shop_id'], user=user, name=name)
except:
raise ValidationDict211Error('已存在管理员')
return ship
class ShopManagerSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(source='user.id', read_only=True)
ico = serializers.ImageField(source='user.ico_thumbnail', read_only=True)
phone = serializers.CharField(source='user.phone_number')
password = serializers.CharField(required=False, write_only=True)
class Meta:
model = ShopManagerShip
fields = ('id', 'ico', 'phone', 'name', 'password')
class SettingSerializer(serializers.Serializer):
no_disturb = serializers.BooleanField(required=False)
apns_voice = serializers.BooleanField(required=False)
class InformationSerializer(serializers.ModelSerializer):
class Meta:
model = MyUserSellerSettingProfile
fields = ('platform', 'version')
class FlyerFilterSerializer(serializers.Serializer):
name = serializers.CharField(required=False)
shop_type = serializers.IntegerField(required=False)
type = serializers.IntegerField(required=False)
| [
"zhengyufei19999@163.com"
] | zhengyufei19999@163.com |
a0a845781242f0d615c2c12534ebf5759c6cfebc | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/tools/third_party/pytest/testing/test_terminal.py | 80f7d558b4c4e1e57bb77e65cd89fec3ebf68c1f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 81,311 | py | """Terminal reporting of the full testing process."""
import collections
import os
import sys
import textwrap
from io import StringIO
from pathlib import Path
from types import SimpleNamespace
from typing import cast
from typing import Dict
from typing import List
from typing import Tuple
import pluggy
import _pytest.config
import _pytest.terminal
import pytest
from _pytest._io.wcwidth import wcswidth
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
from _pytest.terminal import _folded_skips
from _pytest.terminal import _format_trimmed
from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _get_raw_skip_reason
from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
TRANS_FNMATCH = str.maketrans({"[": "[[]", "]": "[]]"})
class Option:
def __init__(self, verbosity=0):
self.verbosity = verbosity
@property
def args(self):
values = []
values.append("--verbosity=%d" % self.verbosity)
return values
@pytest.fixture(
params=[Option(verbosity=0), Option(verbosity=1), Option(verbosity=-1)],
ids=["default", "verbose", "quiet"],
)
def option(request):
return request.param
@pytest.mark.parametrize(
"input,expected",
[
([DistInfo(project_name="test", version=1)], ["test-1"]),
([DistInfo(project_name="pytest-test", version=1)], ["test-1"]),
(
[
DistInfo(project_name="test", version=1),
DistInfo(project_name="test", version=1),
],
["test-1"],
),
],
ids=["normal", "prefix-strip", "deduplicate"],
)
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)
assert result == expected
class TestTerminal:
def test_pass_skip_fail(self, pytester: Pytester, option) -> None:
pytester.makepyfile(
"""
import pytest
def test_ok():
pass
def test_skip():
pytest.skip("xx")
def test_func():
assert 0
"""
)
result = pytester.runpytest(*option.args)
if option.verbosity > 0:
result.stdout.fnmatch_lines(
[
"*test_pass_skip_fail.py::test_ok PASS*",
"*test_pass_skip_fail.py::test_skip SKIP*",
"*test_pass_skip_fail.py::test_func FAIL*",
]
)
elif option.verbosity == 0:
result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"])
else:
result.stdout.fnmatch_lines([".sF*"])
result.stdout.fnmatch_lines(
[" def test_func():", "> assert 0", "E assert 0"]
)
def test_internalerror(self, pytester: Pytester, linecomp) -> None:
modcol = pytester.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
with pytest.raises(ValueError) as excinfo:
raise ValueError("hello")
rep.pytest_internalerror(excinfo.getrepr())
linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"])
def test_writeline(self, pytester: Pytester, linecomp) -> None:
modcol = pytester.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
rep.write_fspath_result(modcol.nodeid, ".")
rep.write_line("hello world")
lines = linecomp.stringio.getvalue().split("\n")
assert not lines[0]
assert lines[1].endswith(modcol.name + " .")
assert lines[2] == "hello world"
def test_show_runtest_logstart(self, pytester: Pytester, linecomp) -> None:
item = pytester.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
item.config.pluginmanager.register(tr)
location = item.reportinfo()
tr.config.hook.pytest_runtest_logstart(
nodeid=item.nodeid, location=location, fspath=str(item.path)
)
linecomp.assert_contains_lines(["*test_show_runtest_logstart.py*"])
def test_runtest_location_shown_before_test_starts(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
def test_1():
import time
time.sleep(20)
"""
)
child = pytester.spawn_pytest("")
child.expect(".*test_runtest_location.*py")
child.sendeof()
child.kill(15)
def test_report_collect_after_half_a_second(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Test for "collecting" being updated after 0.5s"""
pytester.makepyfile(
**{
"test1.py": """
import _pytest.terminal
_pytest.terminal.REPORT_COLLECTING_RESOLUTION = 0
def test_1():
pass
""",
"test2.py": "def test_2(): pass",
}
)
monkeypatch.setenv("PY_COLORS", "1")
child = pytester.spawn_pytest("-v test1.py test2.py")
child.expect(r"collecting \.\.\.")
child.expect(r"collecting 1 item")
child.expect(r"collecting 2 items")
child.expect(r"collected 2 items")
rest = child.read().decode("utf8")
assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
def test_itemreport_subclasses_show_subclassed_file(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
**{
"tests/test_p1": """
class BaseTests(object):
fail = False
def test_p1(self):
if self.fail: assert 0
""",
"tests/test_p2": """
from test_p1 import BaseTests
class TestMore(BaseTests): pass
""",
"tests/test_p3.py": """
from test_p1 import BaseTests
BaseTests.fail = True
class TestMore(BaseTests): pass
""",
}
)
result = pytester.runpytest("tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(["tests/test_p2.py .*", "=* 1 passed in *"])
result = pytester.runpytest("-vv", "-rA", "tests/test_p2.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p2.py::TestMore::test_p1 <- test_p1.py PASSED *",
"*= short test summary info =*",
"PASSED tests/test_p2.py::TestMore::test_p1",
]
)
result = pytester.runpytest("-vv", "-rA", "tests/test_p3.py", "--rootdir=tests")
result.stdout.fnmatch_lines(
[
"tests/test_p3.py::TestMore::test_p1 <- test_p1.py FAILED *",
"*_ TestMore.test_p1 _*",
" def test_p1(self):",
"> if self.fail: assert 0",
"E assert 0",
"",
"tests/test_p1.py:5: AssertionError",
"*= short test summary info =*",
"FAILED tests/test_p3.py::TestMore::test_p1 - assert 0",
"*= 1 failed in *",
]
)
def test_itemreport_directclasses_not_shown_as_subclasses(
self, pytester: Pytester
) -> None:
a = pytester.mkpydir("a123")
a.joinpath("test_hello123.py").write_text(
textwrap.dedent(
"""\
class TestClass(object):
def test_method(self):
pass
"""
)
)
result = pytester.runpytest("-vv")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
result.stdout.no_fnmatch_line("* <- *")
@pytest.mark.parametrize("fulltrace", ("", "--fulltrace"))
def test_keyboard_interrupt(self, pytester: Pytester, fulltrace) -> None:
pytester.makepyfile(
"""
def test_foobar():
assert 0
def test_spamegg():
import py; pytest.skip('skip me please!')
def test_interrupt_me():
raise KeyboardInterrupt # simulating the user
"""
)
result = pytester.runpytest(fulltrace, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
"> assert 0",
"E assert 0",
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
]
)
if fulltrace:
result.stdout.fnmatch_lines(
["*raise KeyboardInterrupt # simulating the user*"]
)
else:
result.stdout.fnmatch_lines(
["(to show a full traceback on KeyboardInterrupt use --full-trace)"]
)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_keyboard_in_sessionstart(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_sessionstart():
raise KeyboardInterrupt
"""
)
pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
result = pytester.runpytest(no_reraise_ctrlc=True)
assert result.ret == 2
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
def test_collect_single_item(self, pytester: Pytester) -> None:
"""Use singular 'item' when reporting a single test item"""
pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 1 item"])
def test_rewrite(self, pytester: Pytester, monkeypatch) -> None:
config = pytester.parseconfig()
f = StringIO()
monkeypatch.setattr(f, "isatty", lambda *args: True)
tr = TerminalReporter(config, f)
tr._tw.fullwidth = 10
tr.write("hello")
tr.rewrite("hey", erase=True)
assert f.getvalue() == "hello" + "\r" + "hey" + (6 * " ")
def test_report_teststatus_explicit_markup(
self, monkeypatch: MonkeyPatch, pytester: Pytester, color_mapping
) -> None:
"""Test that TerminalReporter handles markup explicitly provided by
a pytest_report_teststatus hook."""
monkeypatch.setenv("PY_COLORS", "1")
pytester.makeconftest(
"""
def pytest_report_teststatus(report):
return 'foo', 'F', ('FOO', {'red': True})
"""
)
pytester.makepyfile(
"""
def test_foobar():
pass
"""
)
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(["*{red}FOO{reset}*"])
)
def test_verbose_skip_reason(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip(reason="123")
def test_1():
pass
@pytest.mark.xfail(reason="456")
def test_2():
pass
@pytest.mark.xfail(reason="789")
def test_3():
assert False
@pytest.mark.xfail(reason="")
def test_4():
assert False
@pytest.mark.skip
def test_5():
pass
@pytest.mark.xfail
def test_6():
pass
def test_7():
pytest.skip()
def test_8():
pytest.skip("888 is great")
def test_9():
pytest.xfail()
def test_10():
pytest.xfail("It's 🕙 o'clock")
"""
)
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
[
"test_verbose_skip_reason.py::test_1 SKIPPED (123) *",
"test_verbose_skip_reason.py::test_2 XPASS (456) *",
"test_verbose_skip_reason.py::test_3 XFAIL (789) *",
"test_verbose_skip_reason.py::test_4 XFAIL *",
"test_verbose_skip_reason.py::test_5 SKIPPED (unconditional skip) *",
"test_verbose_skip_reason.py::test_6 XPASS *",
"test_verbose_skip_reason.py::test_7 SKIPPED *",
"test_verbose_skip_reason.py::test_8 SKIPPED (888 is great) *",
"test_verbose_skip_reason.py::test_9 XFAIL *",
"test_verbose_skip_reason.py::test_10 XFAIL (It's 🕙 o'clock) *",
]
)
class TestCollectonly:
def test_collectonly_basic(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_func():
pass
"""
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(
["<Module test_collectonly_basic.py>", " <Function test_func>"]
)
def test_collectonly_skipped_module(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
pytest.skip("hello")
"""
)
result = pytester.runpytest("--collect-only", "-rs")
result.stdout.fnmatch_lines(["*ERROR collecting*"])
def test_collectonly_displays_test_description(
self, pytester: Pytester, dummy_yaml_custom_test
) -> None:
"""Used dummy_yaml_custom_test for an Item without ``obj``."""
pytester.makepyfile(
"""
def test_with_description():
''' This test has a description.
more1.
more2.'''
"""
)
result = pytester.runpytest("--collect-only", "--verbose")
result.stdout.fnmatch_lines(
[
"<YamlFile test1.yaml>",
" <YamlItem test1.yaml>",
"<Module test_collectonly_displays_test_description.py>",
" <Function test_with_description>",
" This test has a description.",
" ",
" more1.",
" more2.",
],
consecutive=True,
)
def test_collectonly_failed_module(self, pytester: Pytester) -> None:
pytester.makepyfile("""raise ValueError(0)""")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*raise ValueError*", "*1 error*"])
def test_collectonly_fatal(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_collectstart(collector):
assert 0, "urgs"
"""
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*INTERNAL*args*"])
assert result.ret == 3
def test_collectonly_simple(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
def test_func1():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
result = pytester.runpytest("--collect-only", p)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*<Module *.py>",
"* <Function test_func1>",
"* <Class TestClass>",
"* <Function test_method>",
]
)
def test_collectonly_error(self, pytester: Pytester) -> None:
p = pytester.makepyfile("import Errlkjqweqwe")
result = pytester.runpytest("--collect-only", p)
assert result.ret == 2
result.stdout.fnmatch_lines(
textwrap.dedent(
"""\
*ERROR*
*ImportError*
*No module named *Errlk*
*1 error*
"""
).strip()
)
def test_collectonly_missing_path(self, pytester: Pytester) -> None:
"""Issue 115: failure in parseargs will cause session not to
have the items attribute."""
result = pytester.runpytest("--collect-only", "uhm_missing_path")
assert result.ret == 4
result.stderr.fnmatch_lines(
["*ERROR: file or directory not found: uhm_missing_path"]
)
def test_collectonly_quiet(self, pytester: Pytester) -> None:
pytester.makepyfile("def test_foo(): pass")
result = pytester.runpytest("--collect-only", "-q")
result.stdout.fnmatch_lines(["*test_foo*"])
def test_collectonly_more_quiet(self, pytester: Pytester) -> None:
pytester.makepyfile(test_fun="def test_foo(): pass")
result = pytester.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
def test_collect_only_summary_status(self, pytester: Pytester) -> None:
"""Custom status depending on test selection using -k or -m. #7701."""
pytester.makepyfile(
test_collect_foo="""
def test_foo(): pass
""",
test_collect_bar="""
def test_foobar(): pass
def test_bar(): pass
""",
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines("*== 3 tests collected in * ==*")
result = pytester.runpytest("--collect-only", "test_collect_foo.py")
result.stdout.fnmatch_lines("*== 1 test collected in * ==*")
result = pytester.runpytest("--collect-only", "-k", "foo")
result.stdout.fnmatch_lines("*== 2/3 tests collected (1 deselected) in * ==*")
result = pytester.runpytest("--collect-only", "-k", "test_bar")
result.stdout.fnmatch_lines("*== 1/3 tests collected (2 deselected) in * ==*")
result = pytester.runpytest("--collect-only", "-k", "invalid")
result.stdout.fnmatch_lines("*== no tests collected (3 deselected) in * ==*")
pytester.mkdir("no_tests_here")
result = pytester.runpytest("--collect-only", "no_tests_here")
result.stdout.fnmatch_lines("*== no tests collected in * ==*")
pytester.makepyfile(
test_contains_error="""
raise RuntimeError
""",
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines("*== 3 tests collected, 1 error in * ==*")
result = pytester.runpytest("--collect-only", "-k", "foo")
result.stdout.fnmatch_lines(
"*== 2/3 tests collected (1 deselected), 1 error in * ==*"
)
class TestFixtureReporting:
def test_setup_fixture_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
assert 0
def test_nada():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
"*setup_function(function):*",
"*setup func*",
"*assert 0*",
"*1 error*",
]
)
assert result.ret != 0
def test_teardown_fixture_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_nada():
pass
def teardown_function(function):
print("teardown func")
assert 0
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
"*teardown_function(function):*",
"*assert 0*",
"*Captured stdout*",
"*teardown func*",
"*1 passed*1 error*",
]
)
def test_teardown_fixture_error_and_test_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
assert False
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
"*teardown_function(function):*",
"*assert False*",
"*Captured stdout*",
"*teardown func*",
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*1 failed*1 error*",
]
)
def test_setup_teardown_output_and_test_failure(self, pytester: Pytester) -> None:
"""Test for issue #442."""
pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*Captured stdout setup*",
"*setup func*",
"*Captured stdout teardown*",
"*teardown func*",
"*1 failed*",
]
)
class TestTerminalFunctional:
def test_deselected(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = pytester.runpytest(
"-Wignore::pytest.PytestRemovedIn7Warning", "-k", "test_two:", testpath
)
result.stdout.fnmatch_lines(
["collected 3 items / 1 deselected / 2 selected", "*test_deselected.py ..*"]
)
assert result.ret == 0
def test_deselected_with_hookwrapper(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_collection_modifyitems(config, items):
yield
deselected = items.pop()
config.hook.pytest_deselected(items=[deselected])
"""
)
testpath = pytester.makepyfile(
"""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = pytester.runpytest(testpath)
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"*= 2 passed, 1 deselected in*",
]
)
assert result.ret == 0
def test_show_deselected_items_using_markexpr_before_test_execution(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
test_show_deselected="""
import pytest
@pytest.mark.foo
def test_foobar():
pass
@pytest.mark.bar
def test_bar():
pass
def test_pass():
pass
"""
)
result = pytester.runpytest("-m", "not foo")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"*test_show_deselected.py ..*",
"*= 2 passed, 1 deselected in * =*",
]
)
result.stdout.no_fnmatch_line("*= 1 deselected =*")
assert result.ret == 0
def test_no_skip_summary_if_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
def test_ok():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("dontshow")
"""
)
result = pytester.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
def test_passes(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_passes():
pass
class TestClass(object):
def test_method(self):
pass
"""
)
old = p1.parent
pytester.chdir()
try:
result = pytester.runpytest()
finally:
os.chdir(old)
result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"])
assert result.ret == 0
def test_header_trailer_info(
self, monkeypatch: MonkeyPatch, pytester: Pytester, request
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
pytester.makepyfile(
"""
def test_passes():
pass
"""
)
result = pytester.runpytest()
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.fnmatch_lines(
[
"*===== test session starts ====*",
"platform %s -- Python %s*pytest-%s**pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
pluggy.__version__,
),
"*test_header_trailer_info.py .*",
"=* 1 passed*in *.[0-9][0-9]s *=",
]
)
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.fnmatch_lines(["plugins: *"])
def test_no_header_trailer_info(
self, monkeypatch: MonkeyPatch, pytester: Pytester, request
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
pytester.makepyfile(
"""
def test_passes():
pass
"""
)
result = pytester.runpytest("--no-header")
verinfo = ".".join(map(str, sys.version_info[:3]))
result.stdout.no_fnmatch_line(
"platform %s -- Python %s*pytest-%s**pluggy-%s"
% (
sys.platform,
verinfo,
pytest.__version__,
pluggy.__version__,
)
)
if request.config.pluginmanager.list_plugin_distinfo():
result.stdout.no_fnmatch_line("plugins: *")
def test_header(self, pytester: Pytester) -> None:
pytester.path.joinpath("tests").mkdir()
pytester.path.joinpath("gui").mkdir()
result = pytester.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0"])
pytester.makeini("""[pytest]""")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["rootdir: *test_header0, configfile: tox.ini"])
pytester.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
["rootdir: *test_header0, configfile: tox.ini, testpaths: tests, gui"]
)
result = pytester.runpytest("tests")
result.stdout.fnmatch_lines(["rootdir: *test_header0, configfile: tox.ini"])
def test_header_absolute_testpath(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regresstion test for #7814."""
tests = pytester.path.joinpath("tests")
tests.mkdir()
pytester.makepyprojecttoml(
"""
[tool.pytest.ini_options]
testpaths = ['{}']
""".format(
tests
)
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"rootdir: *absolute_testpath0, configfile: pyproject.toml, testpaths: {}".format(
tests
)
]
)
def test_no_header(self, pytester: Pytester) -> None:
pytester.path.joinpath("tests").mkdir()
pytester.path.joinpath("gui").mkdir()
pytester.makeini(
"""
[pytest]
testpaths = tests gui
"""
)
result = pytester.runpytest("--no-header")
result.stdout.no_fnmatch_line(
"rootdir: *test_header0, inifile: tox.ini, testpaths: tests, gui"
)
result = pytester.runpytest("tests", "--no-header")
result.stdout.no_fnmatch_line("rootdir: *test_header0, inifile: tox.ini")
def test_no_summary(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_no_summary():
assert false
"""
)
result = pytester.runpytest(p1, "--no-summary")
result.stdout.no_fnmatch_line("*= FAILURES =*")
def test_showlocals(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_showlocals():
x = 3
y = "x" * 5000
assert 0
"""
)
result = pytester.runpytest(p1, "-l")
result.stdout.fnmatch_lines(
[
"x* = 3",
"y* = 'xxxxxx*",
]
)
def test_showlocals_short(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
def test_showlocals_short():
x = 3
y = "xxxx"
assert 0
"""
)
result = pytester.runpytest(p1, "-l", "--tb=short")
result.stdout.fnmatch_lines(
[
"test_showlocals_short.py:*",
" assert 0",
"E assert 0",
" x = 3",
" y = 'xxxx'",
]
)
@pytest.fixture
def verbose_testfile(self, pytester: Pytester) -> Path:
return pytester.makepyfile(
"""
import pytest
def test_fail():
raise ValueError()
def test_pass():
pass
class TestClass(object):
def test_skip(self):
pytest.skip("hello")
def test_gen():
def check(x):
assert x == 1
yield check, 0
"""
)
def test_verbose_reporting(self, verbose_testfile, pytester: Pytester) -> None:
result = pytester.runpytest(
verbose_testfile, "-v", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
[
"*test_verbose_reporting.py::test_fail *FAIL*",
"*test_verbose_reporting.py::test_pass *PASS*",
"*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
"*test_verbose_reporting.py::test_gen *XFAIL*",
]
)
assert result.ret == 1
def test_verbose_reporting_xdist(
self,
verbose_testfile,
monkeypatch: MonkeyPatch,
pytester: Pytester,
pytestconfig,
) -> None:
if not pytestconfig.pluginmanager.get_plugin("xdist"):
pytest.skip("xdist plugin not installed")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
result = pytester.runpytest(
verbose_testfile, "-v", "-n 1", "-Walways::pytest.PytestWarning"
)
result.stdout.fnmatch_lines(
["*FAIL*test_verbose_reporting_xdist.py::test_fail*"]
)
assert result.ret == 1
def test_quiet_reporting(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile("def test_pass(): pass")
result = pytester.runpytest(p1, "-q")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.name not in s
assert "===" not in s
assert "passed" in s
def test_more_quiet_reporting(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile("def test_pass(): pass")
result = pytester.runpytest(p1, "-qq")
s = result.stdout.str()
assert "test session starts" not in s
assert p1.name not in s
assert "===" not in s
assert "passed" not in s
@pytest.mark.parametrize(
"params", [(), ("--collect-only",)], ids=["no-params", "collect-only"]
)
def test_report_collectionfinish_hook(self, pytester: Pytester, params) -> None:
pytester.makeconftest(
"""
def pytest_report_collectionfinish(config, start_path, items):
return [f'hello from hook: {len(items)} items']
"""
)
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(3))
def test(i):
pass
"""
)
result = pytester.runpytest(*params)
result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"])
def test_summary_f_alias(self, pytester: Pytester) -> None:
"""Test that 'f' and 'F' report chars are aliases and don't show up twice in the summary (#6334)"""
pytester.makepyfile(
"""
def test():
assert False
"""
)
result = pytester.runpytest("-rfF")
expected = "FAILED test_summary_f_alias.py::test - assert False"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
def test_summary_s_alias(self, pytester: Pytester) -> None:
"""Test that 's' and 'S' report chars are aliases and don't show up twice in the summary"""
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip
def test():
pass
"""
)
result = pytester.runpytest("-rsS")
expected = "SKIPPED [1] test_summary_s_alias.py:3: unconditional skip"
result.stdout.fnmatch_lines([expected])
assert result.stdout.lines.count(expected) == 1
def test_fail_extra_reporting(pytester: Pytester, monkeypatch) -> None:
monkeypatch.setenv("COLUMNS", "80")
pytester.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
result = pytester.runpytest("-rN")
result.stdout.no_fnmatch_line("*short test summary*")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test summary*",
"FAILED test_fail_extra_reporting.py::test_this - AssertionError: this_failedt...",
]
)
def test_fail_reporting_on_pass(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 1")
result = pytester.runpytest("-rf")
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_extra_reporting(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 1")
result = pytester.runpytest()
result.stdout.no_fnmatch_line("*short test summary*")
result = pytester.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
def test_pass_reporting_on_fail(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 0")
result = pytester.runpytest("-rp")
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_output_reporting(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def setup_module():
print("setup_module")
def teardown_module():
print("teardown_module")
def test_pass_has_output():
print("Four score and seven years ago...")
def test_pass_no_output():
pass
"""
)
result = pytester.runpytest()
s = result.stdout.str()
assert "test_pass_has_output" not in s
assert "Four score and seven years ago..." not in s
assert "test_pass_no_output" not in s
result = pytester.runpytest("-rPp")
result.stdout.fnmatch_lines(
[
"*= PASSES =*",
"*_ test_pass_has_output _*",
"*- Captured stdout setup -*",
"setup_module",
"*- Captured stdout call -*",
"Four score and seven years ago...",
"*- Captured stdout teardown -*",
"teardown_module",
"*= short test summary info =*",
"PASSED test_pass_output_reporting.py::test_pass_has_output",
"PASSED test_pass_output_reporting.py::test_pass_no_output",
"*= 2 passed in *",
]
)
def test_color_yes(pytester: Pytester, color_mapping) -> None:
p1 = pytester.makepyfile(
"""
def fail():
assert 0
def test_this():
fail()
"""
)
result = pytester.runpytest("--color=yes", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"{bold}=*= test session starts =*={reset}",
"collected 1 item",
"",
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
"",
"=*= FAILURES =*=",
"{red}{bold}_*_ test_this _*_{reset}",
"",
" {kw}def{hl-reset} {function}test_this{hl-reset}():",
"> fail()",
"",
"{bold}{red}test_color_yes.py{reset}:5: ",
"_ _ * _ _*",
"",
" {kw}def{hl-reset} {function}fail{hl-reset}():",
"> {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
"",
"{bold}{red}test_color_yes.py{reset}:2: AssertionError",
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
]
)
)
result = pytester.runpytest("--color=yes", "--tb=short", str(p1))
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"{bold}=*= test session starts =*={reset}",
"collected 1 item",
"",
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
"",
"=*= FAILURES =*=",
"{red}{bold}_*_ test_this _*_{reset}",
"{bold}{red}test_color_yes.py{reset}:5: in test_this",
" fail()",
"{bold}{red}test_color_yes.py{reset}:2: in fail",
" {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
]
)
)
def test_color_no(pytester: Pytester) -> None:
pytester.makepyfile("def test_this(): assert 1")
result = pytester.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
result.stdout.no_fnmatch_line("*\x1b[1m*")
@pytest.mark.parametrize("verbose", [True, False])
def test_color_yes_collection_on_non_atty(pytester: Pytester, verbose) -> None:
"""#1397: Skip collect progress report when working on non-terminals."""
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_this(i):
assert 1
"""
)
args = ["--color=yes"]
if verbose:
args.append("-vv")
result = pytester.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
result.stdout.no_fnmatch_line("*collecting 10 items*")
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
def test_getreportopt() -> None:
from _pytest.terminal import _REPORTCHARS_DEFAULT
class FakeConfig:
class Option:
reportchars = _REPORTCHARS_DEFAULT
disable_warnings = False
option = Option()
config = cast(Config, FakeConfig())
assert _REPORTCHARS_DEFAULT == "fE"
assert getreportopt(config) == "wfE"
config.option.reportchars = "sf"
assert getreportopt(config) == "wsf"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfxw"
config.option.reportchars = "a"
assert getreportopt(config) == "wsxXEf"
config.option.reportchars = "N"
assert getreportopt(config) == "w"
config.option.reportchars = "NwfE"
assert getreportopt(config) == "wfE"
config.option.reportchars = "NfENx"
assert getreportopt(config) == "wx"
config.option.disable_warnings = True
config.option.reportchars = "a"
assert getreportopt(config) == "sxXEf"
config.option.reportchars = "sfx"
assert getreportopt(config) == "sfx"
config.option.reportchars = "sfxw"
assert getreportopt(config) == "sfx"
config.option.reportchars = "a"
assert getreportopt(config) == "sxXEf"
config.option.reportchars = "A"
assert getreportopt(config) == "PpsxXEf"
config.option.reportchars = "AN"
assert getreportopt(config) == ""
config.option.reportchars = "NwfE"
assert getreportopt(config) == "fE"
def test_terminalreporter_reportopt_addopts(pytester: Pytester) -> None:
pytester.makeini("[pytest]\naddopts=-rs")
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def tr(request):
tr = request.config.pluginmanager.getplugin("terminalreporter")
return tr
def test_opt(tr):
assert tr.hasopt('skipped')
assert not tr.hasopt('qwe')
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_tbstyle_short(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg(request):
return 42
def test_opt(arg):
x = 0
assert x
"""
)
result = pytester.runpytest("--tb=short")
s = result.stdout.str()
assert "arg = 42" not in s
assert "x = 0" not in s
result.stdout.fnmatch_lines(["*%s:8*" % p.name, " assert x", "E assert*"])
result = pytester.runpytest()
s = result.stdout.str()
assert "x = 0" in s
assert "assert x" in s
def test_traceconfig(pytester: Pytester) -> None:
result = pytester.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*active plugins*"])
assert result.ret == ExitCode.NO_TESTS_COLLECTED
class TestGenericReporting:
"""Test class which can be subclassed with a different option provider to
run e.g. distributed tests."""
def test_collect_fail(self, pytester: Pytester, option) -> None:
pytester.makepyfile("import xyz\n")
result = pytester.runpytest(*option.args)
result.stdout.fnmatch_lines(
["ImportError while importing*", "*No module named *xyz*", "*1 error*"]
)
def test_maxfailures(self, pytester: Pytester, option) -> None:
pytester.makepyfile(
"""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
"""
)
result = pytester.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
[
"*def test_1():*",
"*def test_2():*",
"*! stopping after 2 failures !*",
"*2 failed*",
]
)
def test_maxfailures_with_interrupted(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test(request):
request.session.shouldstop = "session_interrupted"
assert 0
"""
)
result = pytester.runpytest("--maxfail=1", "-ra")
result.stdout.fnmatch_lines(
[
"*= short test summary info =*",
"FAILED *",
"*! stopping after 1 failures !*",
"*! session_interrupted !*",
"*= 1 failed in*",
]
)
def test_tb_option(self, pytester: Pytester, option) -> None:
pytester.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func():
print(6*7)
g() # --calling--
"""
)
for tbopt in ["long", "short", "no"]:
print("testing --tb=%s..." % tbopt)
result = pytester.runpytest("-rN", "--tb=%s" % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert "print(6*7)" in s
else:
assert "print(6*7)" not in s
if tbopt != "no":
assert "--calling--" in s
assert "IndexError" in s
else:
assert "FAILURES" not in s
assert "--calling--" not in s
assert "IndexError" not in s
def test_tb_crashline(self, pytester: Pytester, option) -> None:
p = pytester.makepyfile(
"""
import pytest
def g():
raise IndexError
def test_func1():
print(6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
"""
)
result = pytester.runpytest("--tb=line")
bn = p.name
result.stdout.fnmatch_lines(
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
)
s = result.stdout.str()
assert "def test_func2" not in s
def test_pytest_report_header(self, pytester: Pytester, option) -> None:
pytester.makeconftest(
"""
def pytest_sessionstart(session):
session.config._somevalue = 42
def pytest_report_header(config):
return "hello: %s" % config._somevalue
"""
)
pytester.mkdir("a").joinpath("conftest.py").write_text(
"""
def pytest_report_header(config, start_path):
return ["line1", str(start_path)]
"""
)
result = pytester.runpytest("a")
result.stdout.fnmatch_lines(["*hello: 42*", "line1", str(pytester.path)])
def test_show_capture(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import sys
import logging
def test_one():
sys.stdout.write('!This is stdout!')
sys.stderr.write('!This is stderr!')
logging.warning('!This is a warning log msg!')
assert False, 'Something failed'
"""
)
result = pytester.runpytest("--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
result = pytester.runpytest("--show-capture=all", "--tb=short")
result.stdout.fnmatch_lines(
[
"!This is stdout!",
"!This is stderr!",
"*WARNING*!This is a warning log msg!",
]
)
stdout = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!This is stderr!" not in stdout
assert "!This is stdout!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" in stdout
assert "!This is a warning log msg!" not in stdout
stdout = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" in stdout
stdout = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!This is stdout!" not in stdout
assert "!This is stderr!" not in stdout
assert "!This is a warning log msg!" not in stdout
def test_show_capture_with_teardown_logs(self, pytester: Pytester) -> None:
"""Ensure that the capturing of teardown logs honor --show-capture setting"""
pytester.makepyfile(
"""
import logging
import sys
import pytest
@pytest.fixture(scope="function", autouse="True")
def hook_each_test(request):
yield
sys.stdout.write("!stdout!")
sys.stderr.write("!stderr!")
logging.warning("!log!")
def test_func():
assert False
"""
)
result = pytester.runpytest("--show-capture=stdout", "--tb=short").stdout.str()
assert "!stdout!" in result
assert "!stderr!" not in result
assert "!log!" not in result
result = pytester.runpytest("--show-capture=stderr", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" in result
assert "!log!" not in result
result = pytester.runpytest("--show-capture=log", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" in result
result = pytester.runpytest("--show-capture=no", "--tb=short").stdout.str()
assert "!stdout!" not in result
assert "!stderr!" not in result
assert "!log!" not in result
@pytest.mark.xfail("not hasattr(os, 'dup')")
def test_fdopen_kept_alive_issue124(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import os, sys
k = []
def test_open_file_and_keep_alive(capfd):
stdout = os.fdopen(1, 'w', 1)
k.append(stdout)
def test_close_kept_alive_file():
stdout = k.pop()
stdout.close()
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
def test_tbstyle_native_setup_error(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def setup_error_fixture():
raise Exception("error in exception")
def test_error_fixture(setup_error_fixture):
pass
"""
)
result = pytester.runpytest("--tb=native")
result.stdout.fnmatch_lines(
['*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*']
)
def test_terminal_summary(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_terminal_summary(terminalreporter, exitstatus):
w = terminalreporter
w.section("hello")
w.line("world")
w.line("exitstatus: {0}".format(exitstatus))
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
"""
*==== hello ====*
world
exitstatus: 5
"""
)
@pytest.mark.filterwarnings("default::UserWarning")
def test_terminal_summary_warnings_are_displayed(pytester: Pytester) -> None:
"""Test that warnings emitted during pytest_terminal_summary are displayed.
(#1305).
"""
pytester.makeconftest(
"""
import warnings
def pytest_terminal_summary(terminalreporter):
warnings.warn(UserWarning('internal warning'))
"""
)
pytester.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*= warnings summary (final) =*",
"*conftest.py:3:*internal warning",
"*== 1 failed, 2 warnings in *",
]
)
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 2
@pytest.mark.filterwarnings("default::UserWarning")
def test_terminal_summary_warnings_header_once(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = pytester.runpytest("-ra")
result.stdout.fnmatch_lines(
[
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
"*== 1 failed, 1 warning in *",
]
)
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 1
@pytest.mark.filterwarnings("default")
def test_terminal_no_summary_warnings_header_once(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_failure():
import warnings
warnings.warn("warning_from_" + "test")
assert 0
"""
)
result = pytester.runpytest("--no-summary")
result.stdout.no_fnmatch_line("*= warnings summary =*")
result.stdout.no_fnmatch_line("*= short test summary info =*")
@pytest.fixture(scope="session")
def tr() -> TerminalReporter:
config = _pytest.config._prepareconfig()
return TerminalReporter(config)
@pytest.mark.parametrize(
"exp_color, exp_line, stats_arg",
[
("red", [("1 failed", {"bold": True, "red": True})], {"failed": [1]}),
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("1 passed", {"bold": False, "green": True}),
],
{"failed": [1], "passed": [1]},
),
("red", [("1 error", {"bold": True, "red": True})], {"error": [1]}),
("red", [("2 errors", {"bold": True, "red": True})], {"error": [1, 2]}),
(
"red",
[
("1 passed", {"bold": False, "green": True}),
("1 error", {"bold": True, "red": True}),
],
{"error": [1], "passed": [1]},
),
("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": [1]}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 weird", {"bold": True, "yellow": True}),
],
{"weird": [1], "passed": [1]},
),
("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": [1]}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 warning", {"bold": True, "yellow": True}),
],
{"warnings": [1], "passed": [1]},
),
(
"green",
[("5 passed", {"bold": True, "green": True})],
{"passed": [1, 2, 3, 4, 5]},
),
("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": [1]}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 skipped", {"bold": False, "yellow": True}),
],
{"skipped": [1], "passed": [1]},
),
(
"yellow",
[("1 deselected", {"bold": True, "yellow": True})],
{"deselected": [1]},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 deselected", {"bold": False, "yellow": True}),
],
{"deselected": [1], "passed": [1]},
),
("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": [1]}),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("1 xfailed", {"bold": False, "yellow": True}),
],
{"xfailed": [1], "passed": [1]},
),
("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": [1]}),
(
"yellow",
[
("1 passed", {"bold": False, "green": True}),
("1 xpassed", {"bold": True, "yellow": True}),
],
{"xpassed": [1], "passed": [1]},
),
("yellow", [("no tests ran", {"yellow": True})], {}),
("yellow", [("no tests ran", {"yellow": True})], {"": [1]}),
(
"green",
[("1 passed", {"bold": True, "green": True})],
{"": [1], "passed": [1]},
),
(
"red",
[
("1 failed", {"bold": True, "red": True}),
("2 passed", {"bold": False, "green": True}),
("3 xfailed", {"bold": False, "yellow": True}),
],
{"passed": [1, 2], "failed": [1], "xfailed": [1, 2, 3]},
),
(
"green",
[
("1 passed", {"bold": True, "green": True}),
("2 skipped", {"bold": False, "yellow": True}),
("3 deselected", {"bold": False, "yellow": True}),
("2 xfailed", {"bold": False, "yellow": True}),
],
{
"passed": [1],
"skipped": [1, 2],
"deselected": [1, 2, 3],
"xfailed": [1, 2],
},
),
],
)
def test_summary_stats(
tr: TerminalReporter,
exp_line: List[Tuple[str, Dict[str, bool]]],
exp_color: str,
stats_arg: Dict[str, List[object]],
) -> None:
tr.stats = stats_arg
class fake_session:
testscollected = 0
tr._session = fake_session
assert tr._is_last_item
tr._main_color = None
print("Based on stats: %s" % stats_arg)
print(f'Expect summary: "{exp_line}"; with color "{exp_color}"')
(line, color) = tr.build_summary_stats_line()
print(f'Actually got: "{line}"; with color "{color}"')
assert line == exp_line
assert color == exp_color
def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport):
count_towards_summary = True
r1 = DummyReport()
r2 = DummyReport()
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red")
r1.count_towards_summary = False
tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red")
class TestClassicOutputStyle:
"""Ensure classic output style works as expected (#3883)"""
@pytest.fixture
def test_files(self, pytester: Pytester) -> None:
pytester.makepyfile(
**{
"test_one.py": "def test_one(): pass",
"test_two.py": "def test_two(): assert 0",
"sub/test_three.py": """
def test_three_1(): pass
def test_three_2(): assert 0
def test_three_3(): pass
""",
}
)
def test_normal_verbosity(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic")
result.stdout.fnmatch_lines(
[
"test_one.py .",
"test_two.py F",
f"sub{os.sep}test_three.py .F.",
"*2 failed, 3 passed in*",
]
)
def test_verbose(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic", "-v")
result.stdout.fnmatch_lines(
[
"test_one.py::test_one PASSED",
"test_two.py::test_two FAILED",
f"sub{os.sep}test_three.py::test_three_1 PASSED",
f"sub{os.sep}test_three.py::test_three_2 FAILED",
f"sub{os.sep}test_three.py::test_three_3 PASSED",
"*2 failed, 3 passed in*",
]
)
def test_quiet(self, pytester: Pytester, test_files) -> None:
result = pytester.runpytest("-o", "console_output_style=classic", "-q")
result.stdout.fnmatch_lines([".F.F.", "*2 failed, 3 passed in*"])
class TestProgressOutputStyle:
@pytest.fixture
def many_tests_files(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foo(i): pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): pass
""",
)
def test_zero_tests_collected(self, pytester: Pytester) -> None:
"""Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being
actually collected (#2971)."""
pytester.makeconftest(
"""
def pytest_collection_modifyitems(items, config):
from _pytest.runner import CollectReport
for node_id in ('nodeid1', 'nodeid2'):
rep = CollectReport(node_id, 'passed', None, None)
rep.when = 'passed'
rep.duration = 0.1
config.hook.pytest_runtest_logreport(report=rep)
"""
)
output = pytester.runpytest()
output.stdout.no_fnmatch_line("*ZeroDivisionError*")
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[ 50%\]",
r"test_foo.py \.{5} \s+ \[ 75%\]",
r"test_foobar.py \.{5} \s+ \[100%\]",
]
)
def test_colored_progress(
self, pytester: Pytester, monkeypatch, color_mapping
) -> None:
monkeypatch.setenv("PY_COLORS", "1")
pytester.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_bar(i): pass
""",
test_foo="""
import pytest
import warnings
@pytest.mark.parametrize('i', range(5))
def test_foo(i):
warnings.warn(DeprecationWarning("collection"))
pass
""",
test_foobar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_foobar(i): raise ValueError()
""",
)
result = pytester.runpytest()
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
]
)
)
result = pytester.runpytest("test_axfail.py")
result.stdout.re_match_lines(
color_mapping.format_for_rematch(
[
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
)
)
def test_count(self, many_tests_files, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines(
[
r"test_bar.py \.{10} \s+ \[10/20\]",
r"test_foo.py \.{5} \s+ \[15/20\]",
r"test_foobar.py \.{5} \s+ \[20/20\]",
]
)
def test_verbose(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]",
]
)
def test_verbose_count(self, many_tests_files, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = pytester.runpytest("-v")
output.stdout.re_match_lines(
[
r"test_bar.py::test_bar\[0\] PASSED \s+ \[ 1/20\]",
r"test_foo.py::test_foo\[4\] PASSED \s+ \[15/20\]",
r"test_foobar.py::test_foobar\[4\] PASSED \s+ \[20/20\]",
]
)
def test_xdist_normal(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[100%\]"])
def test_xdist_normal_count(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
pytester.makeini(
"""
[pytest]
console_output_style = count
"""
)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"\.{20} \s+ \[20/20\]"])
def test_xdist_verbose(
self, many_tests_files, pytester: Pytester, monkeypatch
) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2", "-v")
output.stdout.re_match_lines_random(
[
r"\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]",
r"\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]",
]
)
output.stdout.fnmatch_lines_random(
[
line.translate(TRANS_FNMATCH)
for line in [
"test_bar.py::test_bar[0] ",
"test_foo.py::test_foo[0] ",
"test_foobar.py::test_foobar[0] ",
"[gw?] [ 5%] PASSED test_*[?] ",
"[gw?] [ 10%] PASSED test_*[?] ",
"[gw?] [ 55%] PASSED test_*[?] ",
"[gw?] [ 60%] PASSED test_*[?] ",
"[gw?] [ 95%] PASSED test_*[?] ",
"[gw?] [100%] PASSED test_*[?] ",
]
]
)
def test_capture_no(self, many_tests_files, pytester: Pytester) -> None:
output = pytester.runpytest("-s")
output.stdout.re_match_lines(
[r"test_bar.py \.{10}", r"test_foo.py \.{5}", r"test_foobar.py \.{5}"]
)
output = pytester.runpytest("--capture=no")
output.stdout.no_fnmatch_line("*%]*")
class TestProgressWithTeardown:
"""Ensure we show the correct percentages for tests that fail during teardown (#3088)"""
@pytest.fixture
def contest_with_teardown_fixture(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def fail_teardown():
yield
assert False
"""
)
@pytest.fixture
def many_files(self, pytester: Pytester, contest_with_teardown_fixture) -> None:
pytester.makepyfile(
test_bar="""
import pytest
@pytest.mark.parametrize('i', range(5))
def test_bar(fail_teardown, i):
pass
""",
test_foo="""
import pytest
@pytest.mark.parametrize('i', range(15))
def test_foo(fail_teardown, i):
pass
""",
)
def test_teardown_simple(
self, pytester: Pytester, contest_with_teardown_fixture
) -> None:
pytester.makepyfile(
"""
def test_foo(fail_teardown):
pass
"""
)
output = pytester.runpytest()
output.stdout.re_match_lines([r"test_teardown_simple.py \.E\s+\[100%\]"])
def test_teardown_with_test_also_failing(
self, pytester: Pytester, contest_with_teardown_fixture
) -> None:
pytester.makepyfile(
"""
def test_foo(fail_teardown):
assert 0
"""
)
output = pytester.runpytest("-rfE")
output.stdout.re_match_lines(
[
r"test_teardown_with_test_also_failing.py FE\s+\[100%\]",
"FAILED test_teardown_with_test_also_failing.py::test_foo - assert 0",
"ERROR test_teardown_with_test_also_failing.py::test_foo - assert False",
]
)
def test_teardown_many(self, pytester: Pytester, many_files) -> None:
output = pytester.runpytest()
output.stdout.re_match_lines(
[r"test_bar.py (\.E){5}\s+\[ 25%\]", r"test_foo.py (\.E){15}\s+\[100%\]"]
)
def test_teardown_many_verbose(
self, pytester: Pytester, many_files, color_mapping
) -> None:
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
"test_bar.py::test_bar[0] PASSED * [ 5%]",
"test_bar.py::test_bar[0] ERROR * [ 5%]",
"test_bar.py::test_bar[4] PASSED * [ 25%]",
"test_foo.py::test_foo[14] PASSED * [100%]",
"test_foo.py::test_foo[14] ERROR * [100%]",
"=* 20 passed, 20 errors in *",
]
)
)
def test_xdist_normal(self, many_files, pytester: Pytester, monkeypatch) -> None:
pytest.importorskip("xdist")
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
output = pytester.runpytest("-n2")
output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"])
def test_skip_reasons_folding() -> None:
path = "xyz"
lineno = 3
message = "justso"
longrepr = (path, lineno, message)
class X:
pass
ev1 = cast(CollectReport, X())
ev1.when = "execute"
ev1.skipped = True
ev1.longrepr = longrepr
ev2 = cast(CollectReport, X())
ev2.when = "execute"
ev2.longrepr = longrepr
ev2.skipped = True
ev3 = cast(CollectReport, X())
ev3.when = "collect"
ev3.longrepr = longrepr
ev3.skipped = True
values = _folded_skips(Path.cwd(), [ev1, ev2, ev3])
assert len(values) == 1
num, fspath, lineno_, reason = values[0]
assert num == 3
assert fspath == path
assert lineno_ == lineno
assert reason == message
def test_line_with_reprcrash(monkeypatch: MonkeyPatch) -> None:
mocked_verbose_word = "FAILED"
mocked_pos = "some::nodeid"
def mock_get_pos(*args):
return mocked_pos
monkeypatch.setattr(_pytest.terminal, "_get_pos", mock_get_pos)
class config:
pass
class rep:
def _get_verbose_word(self, *args):
return mocked_verbose_word
class longrepr:
class reprcrash:
pass
def check(msg, width, expected):
__tracebackhide__ = True
if msg:
rep.longrepr.reprcrash.message = msg
actual = _get_line_with_reprcrash_message(config, rep(), width)
assert actual == expected
if actual != f"{mocked_verbose_word} {mocked_pos}":
assert len(actual) <= width
assert wcswidth(actual) <= width
check(None, 80, "FAILED some::nodeid")
check("msg", 80, "FAILED some::nodeid - msg")
check("msg", 3, "FAILED some::nodeid")
check("msg", 24, "FAILED some::nodeid")
check("msg", 25, "FAILED some::nodeid - msg")
check("some longer msg", 24, "FAILED some::nodeid")
check("some longer msg", 25, "FAILED some::nodeid - ...")
check("some longer msg", 26, "FAILED some::nodeid - s...")
check("some\nmessage", 25, "FAILED some::nodeid - ...")
check("some\nmessage", 26, "FAILED some::nodeid - some")
check("some\nmessage", 80, "FAILED some::nodeid - some")
check("🉐🉐🉐🉐🉐\n2nd line", 25, "FAILED some::nodeid - ...")
check("🉐🉐🉐🉐🉐\n2nd line", 26, "FAILED some::nodeid - ...")
check("🉐🉐🉐🉐🉐\n2nd line", 27, "FAILED some::nodeid - 🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 28, "FAILED some::nodeid - 🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 29, "FAILED some::nodeid - 🉐🉐...")
mocked_pos = "nodeid::🉐::withunicode"
check("🉐🉐🉐🉐🉐\n2nd line", 29, "FAILED nodeid::🉐::withunicode")
check("🉐🉐🉐🉐🉐\n2nd line", 40, "FAILED nodeid::🉐::withunicode - 🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 41, "FAILED nodeid::🉐::withunicode - 🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 42, "FAILED nodeid::🉐::withunicode - 🉐🉐🉐...")
check("🉐🉐🉐🉐🉐\n2nd line", 80, "FAILED nodeid::🉐::withunicode - 🉐🉐🉐🉐🉐")
@pytest.mark.parametrize(
"seconds, expected",
[
(10.0, "10.00s"),
(10.34, "10.34s"),
(59.99, "59.99s"),
(60.55, "60.55s (0:01:00)"),
(123.55, "123.55s (0:02:03)"),
(60 * 60 + 0.5, "3600.50s (1:00:00)"),
],
)
def test_format_session_duration(seconds, expected):
from _pytest.terminal import format_session_duration
assert format_session_duration(seconds) == expected
def test_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", str(p1))
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
"*= ERRORS =*",
"*_ ERROR collecting test_collecterror.py _*",
"E SyntaxError: *",
"*= short test summary info =*",
"ERROR test_collecterror.py",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
def test_no_summary_collecterror(pytester: Pytester) -> None:
p1 = pytester.makepyfile("raise SyntaxError()")
result = pytester.runpytest("-ra", "--no-summary", str(p1))
result.stdout.no_fnmatch_line("*= ERRORS =*")
def test_via_exec(pytester: Pytester) -> None:
p1 = pytester.makepyfile("exec('def test_via_exec(): pass')")
result = pytester.runpytest(str(p1), "-vv")
result.stdout.fnmatch_lines(
["test_via_exec.py::test_via_exec <- <string> PASSED*", "*= 1 passed in *"]
)
class TestCodeHighlight:
def test_code_highlight_simple(self, pytester: Pytester, color_mapping) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_continuation(
self, pytester: Pytester, color_mapping
) -> None:
pytester.makepyfile(
"""
def test_foo():
print('''
'''); assert 0
"""
)
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
" {print}print{hl-reset}({str}'''{hl-reset}{str}{hl-reset}",
"> {str} {hl-reset}{str}'''{hl-reset}); {kw}assert{hl-reset} {number}0{hl-reset}",
"{bold}{red}E assert 0{reset}",
]
)
)
def test_code_highlight_custom_theme(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME", "solarized-dark")
monkeypatch.setenv("PYTEST_THEME_MODE", "dark")
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {kw}def{hl-reset} {function}test_foo{hl-reset}():",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_invalid_theme(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME", "invalid")
result = pytester.runpytest_subprocess("--color=yes")
result.stderr.fnmatch_lines(
"ERROR: PYTEST_THEME environment variable had an invalid value: 'invalid'. "
"Only valid pygment styles are allowed."
)
def test_code_highlight_invalid_theme_mode(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME_MODE", "invalid")
result = pytester.runpytest_subprocess("--color=yes")
result.stderr.fnmatch_lines(
"ERROR: PYTEST_THEME_MODE environment variable had an invalid value: 'invalid'. "
"The only allowed values are 'dark' and 'light'."
)
def test_raw_skip_reason_skipped() -> None:
report = SimpleNamespace()
report.skipped = True
report.longrepr = ("xyz", 3, "Skipped: Just so")
reason = _get_raw_skip_reason(cast(TestReport, report))
assert reason == "Just so"
def test_raw_skip_reason_xfail() -> None:
report = SimpleNamespace()
report.wasxfail = "reason: To everything there is a season"
reason = _get_raw_skip_reason(cast(TestReport, report))
assert reason == "To everything there is a season"
def test_format_trimmed() -> None:
msg = "unconditional skip"
assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) "
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "
| [
"mcastelluccio@mozilla.com"
] | mcastelluccio@mozilla.com |
388f86bda5afb381925eaa8c9ed9cfb68cbc1a51 | fe63dd13e412b2d988da6163fea2dc16cb98b5b5 | /uconf/converter.py | 51ad25b1cc0615920b347f772870ef1384e0cb3a | [
"BSD-2-Clause"
] | permissive | rbarrois/uconf | 223b22874d47175f85eaea16f6a05778454dd425 | 41e8d2384ff3bf900c372e371ff8b40990ff4322 | refs/heads/master | 2023-06-12T14:14:28.894990 | 2020-07-17T09:30:03 | 2020-07-17T09:30:03 | 15,620,174 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,978 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2013 Raphaël Barrois
# This software is distributed under the two-clause BSD license.
import difflib
import re
from uconf import rule_parser
class CommandError(Exception):
pass
class FileProcessor:
"""Handles 'standard' processing of a file.
Attributes:
src (str list): lines of the file to process
fs (FileSystem): abstraction toward the filesystem
"""
def __init__(self, src, fs):
self.src = list(src)
self.fs = fs
def _get_gen_config(self, categories):
return GeneratorConfig(
categories=categories,
commands=[cmd() for cmd in DEFAULT_COMMANDS],
fs=self.fs,
)
def forward(self, categories):
"""Process the source file with an active list of categories."""
gen_config = self._get_gen_config(categories)
generator = gen_config.load(self.src)
for line in generator:
if line.output is not None:
yield line.output
def backward(self, categories, modified):
"""Revert a file.
Args:
categories (str iterable): active categories
modified (str list): lines of the modified file
Yields:
str: updated lines for the original file
"""
categories = frozenset(categories)
original_output = self.forward(categories)
diff = Differ(original_output, modified)
gen_config = self._get_gen_config(categories)
generator = gen_config.load(self.src)
backporter = Backporter(diff, generator)
for line in backporter:
yield line
class Differ:
"""Computes differences between two files (as string lists).
Based on difflib.SequenceMatcher, but yields atomic operations.
Attributes:
original (str list): lines of the original file
modified (str list): lines of the modified file
"""
def __init__(self, original, modified):
self.original = list(original)
self.modified = list(modified)
def __iter__(self):
"""Yield atomic diff lines.
Yields:
(operation, new_line) tuples.
"""
matcher = difflib.SequenceMatcher(a=self.original, b=self.modified)
opcodes = matcher.get_opcodes()
for opcode, original_i, original_j, modified_i, modified_j in opcodes:
if opcode == 'equal':
for original_lineno in range(original_i, original_j):
yield (opcode, self.original[original_lineno])
elif opcode == 'insert':
for modified_lineno in range(modified_i, modified_j):
yield (opcode, self.modified[modified_lineno])
elif opcode == 'delete':
for original_lineno in range(original_i, original_j):
yield (opcode, self.original[original_lineno])
elif opcode == 'replace':
common = min(original_j - original_i, modified_j - modified_i)
for modified_lineno in range(modified_i, modified_i + common):
yield (opcode, self.modified[modified_lineno])
for modified_lineno in range(modified_i + common, modified_j):
yield ('insert', self.modified[modified_lineno])
for original_lineno in range(original_i + common, original_j):
yield ('delete', self.original[original_lineno])
class Backporter:
"""Handles the backporting of a diff to an original file.
Attributes:
diff ((operation, new_line) iterable): the lines of the diff
source (Line iterable): the lines of the source
"""
def __init__(self, diff, source):
self.diff = diff
self.source = source
def reverse(self, output):
"""Convert back an output line into its original version."""
line = Line(output, None)
line.fill_original()
return line.original
def __iter__(self):
"""Yield lines from the initial file."""
diff = iter(self.diff)
for line in self.source:
# Loop through the generated lines
if line.output is None:
# Masked line (comment, command)
# Always include
yield line.original
else:
action, output = next(diff)
while action == 'insert':
# Inserting lines
# Always include, without forwarding the source
yield self.reverse(output)
action, output = next(diff)
if action == 'delete':
# Deleting one line, advance the source
continue
elif action == 'equal':
# No change
yield line.original
else:
assert action == "replace"
# Backport the resulting line
yield self.reverse(output)
# Handle additional lines from the diff
# Should only be 'insert' lines.
for action, output in diff:
assert action == 'insert', "Unexpected action %s on %r" % (action, output)
yield self.reverse(output)
class Line:
def __init__(self, output, original):
self.output = output
self.original = original
def __repr__(self):
return "Line(%r, %r)" % (self.output, self.original)
def __hash__(self):
return hash((self.output, self.original))
def __eq__(self, other):
if not isinstance(other, Line):
return NotImplemented
return self.output == other.output and self.original == other.original
def fill_original(self):
"""Fill the 'original' part from the output."""
if self.original is not None:
return
# If the output line looks like a comment or command, escape it.
should_escape_re = re.compile(r'^["!#]@')
if should_escape_re.match(self.output):
self.original = '%s@%s' % (
self.output[:2],
self.output[2:],
)
else:
self.original = self.output
class Block:
KIND_IF = 'if'
KIND_WITH = 'with'
def __init__(self, kind, start_line, published=True, context=None):
self.kind = kind
self.published = published
self.context = context or {}
self.start_line = start_line
def __repr__(self):
return "Block(%r, %d, %r, %r)" % (
self.kind,
self.start_line,
self.published,
self.context,
)
class BlockStack:
def __init__(self):
self.blocks = []
def __nonzero__(self):
return bool(self.blocks)
def __len__(self):
return len(self.blocks)
def __repr__(self):
return "<BlockStack: %r>" % self.blocks
@property
def published(self):
return all(b.published for b in self.blocks)
@property
def merged_context(self):
context = {}
for block in self.blocks:
context.update(block.context)
return context
def enter(self, *args, **kwargs):
block = Block(*args, **kwargs)
self.blocks.append(block)
return block
def leave(self, kind):
if not self.blocks:
raise ValueError("Not inside a block.")
last_kind = self.blocks[-1].kind
if last_kind != kind:
raise ValueError("Unexpected last block kind: %s!=%s." % (last_kind, kind))
return self.blocks.pop()
class BaseCommand:
"""A command.
Entry points: get_keys(), handle(...).
"""
keys = ()
def get_keys(self):
"""Return the list of "keys" (or "commands") handled by this class."""
return self.keys
def handle(self, key, argline, state, config):
"""Handle a line.
Args:
key (str): one of the keys in get_keys()
argline (str): everything after the key and a space
state (GeneratorState): the current state of the generator
config (GeneratorConfig): various config-time params of the generator
"""
raise NotImplementedError()
class BaseBlockCommand(BaseCommand):
enter_keys = ()
inside_keys = ()
exit_keys = ()
def get_keys(self):
return self.enter_keys + self.inside_keys + self.exit_keys
def handle(self, key, argline, state, config):
if key in self.enter_keys:
return self.enter(key, argline, state, config)
elif key in self.inside_keys:
return self.inside(key, argline, state, config)
else:
assert key in self.exit_keys
return self.exit(key, argline, state, config)
def enter(self, key, argline, state, config):
raise NotImplementedError()
def inside(self, key, argline, state, config):
raise NotImplementedError()
def exit(self, key, argline, state, config):
raise NotImplementedError()
class IfBlockCommand(BaseBlockCommand):
enter_keys = ('if',)
inside_keys = ('else', 'elif')
exit_keys = ('endif',)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rule_lexer = rule_parser.RuleLexer()
def enter(self, key, argline, state, config):
rule = self.rule_lexer.get_rule(argline)
state.enter_block(Block.KIND_IF, published=rule.test(config.categories))
def inside(self, key, argline, state, config):
if key == 'else':
if argline:
state.error("Command 'else' takes no argument, got %r", argline)
last_block = state.leave_block(Block.KIND_IF)
state.enter_block(Block.KIND_IF, published=not last_block.published)
else:
assert key == 'elif'
last_block = state.leave_block(Block.KIND_IF)
if last_block.published:
published = False
else:
rule = self.rule_lexer.get_rule(argline)
published = rule.test(config.categories)
state.enter_block(Block.KIND_IF, published=published)
def exit(self, key, argline, state, config):
assert key == 'endif'
if argline:
state.error("Command 'endif' takes no argument, got %r", argline)
state.leave_block(Block.KIND_IF)
class WithBlockCommand(BaseBlockCommand):
enter_keys = ('with', 'withfile')
exit_keys = ('endwith',)
with_args_re = re.compile(r'^(\w+)=(.*)$')
def _read_file(self, filename, config):
"""Read one line from a file."""
return config.fs.read_one_line(filename)
def _parse_with_args(self, args, state):
"""Parce "#@with" arguments (and validate the line structure)."""
match = self.with_args_re.match(args)
if not match:
state.error("Invalid 'with' argument %r", args)
return match.groups()
def enter(self, key, argline, state, config):
if key == 'with':
var, value = self._parse_with_args(argline, state=state)
state.enter_block(Block.KIND_WITH, context={var: value})
else:
assert key == 'withfile'
var, filename = self._parse_with_args(argline, state=state)
value = self._read_file(filename, config)
state.enter_block(Block.KIND_WITH, context={var: value})
def exit(self, key, argline, state, config):
last_block = state.leave_block(Block.KIND_WITH)
if argline and argline not in last_block.context:
raise CommandError(
"Block mismatch: closing 'with' block from line %d with invalid variable %r"
% (last_block.start_line, argline),
)
class GeneratorState:
"""Handles the internal generator state.
Attributes:
in_published_block (bool): whether the current block should be published
context (str => str dict): maps a placeholder name to its content
_current_lineno (int): the current line number
"""
def __init__(self):
self.block_stack = BlockStack()
self._current_lineno = 0
@property
def in_published_block(self):
return self.block_stack.published
@property
def context(self):
return self.block_stack.merged_context
def error(self, message, *args):
err_msg = "Error on line %d: " % self._current_lineno
raise ValueError((err_msg + message) % args)
def advance_to(self, lineno):
self._current_lineno = lineno
def enter_block(self, kind, published=True, context=None):
return self.block_stack.enter(
kind=kind,
published=published,
context=context,
start_line=self._current_lineno,
)
def leave_block(self, kind):
try:
return self.block_stack.leave(kind)
except ValueError as e:
self.invalid("Error when closing block: %r", e)
DEFAULT_COMMANDS = [
IfBlockCommand,
WithBlockCommand,
]
class Generator:
"""Generate the output from a source.
Attributes:
src (iterable of str): the source lines
state (GeneratorState): the current generator state
"""
command_prefix_re = re.compile(r'^(["!#]@)(.+)$')
def __init__(self, src, commands, config):
self.src = src
self.config = config
self.state = GeneratorState()
self.commands_by_key = {}
for command in commands:
for key in command.get_keys():
if key in self.commands_by_key:
raise ValueError(
"Duplicate command for key %s: got %r and %r"
% (key, command, self.commands_by_key[key]),
)
self.commands_by_key[key] = command
def __iter__(self):
for lineno, line in enumerate(self.src):
self.state.advance_to(lineno)
match = self.command_prefix_re.match(line)
if match:
prefix, command = match.groups()
output = self.handle_line(prefix, command)
# If displaying the line, replace placeholders.
elif self.state.in_published_block:
updated_line = line
for var, value in self.state.context.items():
pattern = '@@%s@@' % var
updated_line = updated_line.replace(pattern, value)
output = updated_line
# Not displaying the line
else:
output = None
yield Line(output, line)
def handle_line(self, prefix, command):
if command.startswith('#'):
# A comment
return None
elif command.startswith('@'):
# An escaped line
return prefix + command[1:]
else:
if ' ' in command:
name, args = command.split(' ', 1)
else:
name, args = command, ''
self.handle_command(name, args)
return None
def handle_command(self, command, args):
"""Handle a "#@<command>" line."""
if command not in self.commands_by_key:
raise CommandError("Unknown command '%s' (not in %r)" % (command, sorted(self.commands_by_key)))
handler = self.commands_by_key[command]
handler.handle(command, args, self.state, self.config)
class GeneratorConfig:
def __init__(self, categories, commands, fs, generator=Generator):
self.categories = categories
self.commands = commands
self.fs = fs
self.fs_root = '/'
self.generator_class = generator
def load(self, source_file):
return self.generator_class(
source_file,
config=self,
commands=self.commands,
)
| [
"raphael.barrois@polytechnique.org"
] | raphael.barrois@polytechnique.org |
c49da5ff1522e716b2caa15068e8c33c06ee613b | 119b6105b4efead8b88d8881d916b0a8b3d28237 | /ThriftAPI/gen_py_tmp/Deploy/ttypes.py | 33db186159e7839b849744a1a84fdd1d0c59f9fd | [] | no_license | GWenPeng/Webui_framework | 04ce4eafc9d1126ff9148347d80e926d295c7457 | a487fdc7f1e4c1c738ec9bc71cef7209da9be7f2 | refs/heads/master | 2022-11-30T00:27:05.513490 | 2020-08-06T01:49:58 | 2020-08-06T01:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 37,673 | py | #
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import EThriftException.ttypes
import ECMSManager.ttypes
from thrift.transport import TTransport
all_structs = []
class HaSys(object):
NORMAL = 0
BASIC = 1
APP = 2
STORAGE = 3
DB = 4
_VALUES_TO_NAMES = {
0: "NORMAL",
1: "BASIC",
2: "APP",
3: "STORAGE",
4: "DB",
}
_NAMES_TO_VALUES = {
"NORMAL": 0,
"BASIC": 1,
"APP": 2,
"STORAGE": 3,
"DB": 4,
}
class ncTLVSSys(object):
APP = 1
STORAGE = 2
_VALUES_TO_NAMES = {
1: "APP",
2: "STORAGE",
}
_NAMES_TO_VALUES = {
"APP": 1,
"STORAGE": 2,
}
class ncTDeployManagerError(object):
NCT_NOT_APPLICATION_NODE = 50001
NCT_SERVICE_PACKAGE_MISSING = 50002
NCT_SERVICE_ALREADY_INSTALLED = 50003
NCT_SERVICE_NOT_INSTALL = 50004
NCT_SERVICE_VERSION_LOWER = 50005
NCT_SERVICE_PACKAGE_DAMAGE = 50006
NCT_NODE_IS_OFFLINE = 50007
NCT_NODE_IS_MASTER = 50008
NCT_NODE_TYPE_IS_INVALID = 50009
_VALUES_TO_NAMES = {
50001: "NCT_NOT_APPLICATION_NODE",
50002: "NCT_SERVICE_PACKAGE_MISSING",
50003: "NCT_SERVICE_ALREADY_INSTALLED",
50004: "NCT_SERVICE_NOT_INSTALL",
50005: "NCT_SERVICE_VERSION_LOWER",
50006: "NCT_SERVICE_PACKAGE_DAMAGE",
50007: "NCT_NODE_IS_OFFLINE",
50008: "NCT_NODE_IS_MASTER",
50009: "NCT_NODE_TYPE_IS_INVALID",
}
_NAMES_TO_VALUES = {
"NCT_NOT_APPLICATION_NODE": 50001,
"NCT_SERVICE_PACKAGE_MISSING": 50002,
"NCT_SERVICE_ALREADY_INSTALLED": 50003,
"NCT_SERVICE_NOT_INSTALL": 50004,
"NCT_SERVICE_VERSION_LOWER": 50005,
"NCT_SERVICE_PACKAGE_DAMAGE": 50006,
"NCT_NODE_IS_OFFLINE": 50007,
"NCT_NODE_IS_MASTER": 50008,
"NCT_NODE_TYPE_IS_INVALID": 50009,
}
class ncTVersionCheck(object):
NCT_PACKAGE_VERSION_EQ_SERVICE_VERSION = 1
NCT_PACKAGE_VERSION_HIGER_THAN_SERVICE_VERSION = 2
_VALUES_TO_NAMES = {
1: "NCT_PACKAGE_VERSION_EQ_SERVICE_VERSION",
2: "NCT_PACKAGE_VERSION_HIGER_THAN_SERVICE_VERSION",
}
_NAMES_TO_VALUES = {
"NCT_PACKAGE_VERSION_EQ_SERVICE_VERSION": 1,
"NCT_PACKAGE_VERSION_HIGER_THAN_SERVICE_VERSION": 2,
}
class ncTServiceInfos(object):
"""
Attributes:
- service_name
- service_version
- exception_nodes
- installed_nodes
"""
def __init__(self, service_name=None, service_version=None, exception_nodes=None, installed_nodes=None,):
self.service_name = service_name
self.service_version = service_version
self.exception_nodes = exception_nodes
self.installed_nodes = installed_nodes
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.service_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.service_version = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.exception_nodes = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.exception_nodes.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.installed_nodes = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in range(_size6):
_elem11 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.installed_nodes.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ncTServiceInfos')
if self.service_name is not None:
oprot.writeFieldBegin('service_name', TType.STRING, 1)
oprot.writeString(self.service_name.encode('utf-8') if sys.version_info[0] == 2 else self.service_name)
oprot.writeFieldEnd()
if self.service_version is not None:
oprot.writeFieldBegin('service_version', TType.STRING, 2)
oprot.writeString(self.service_version.encode('utf-8') if sys.version_info[0] == 2 else self.service_version)
oprot.writeFieldEnd()
if self.exception_nodes is not None:
oprot.writeFieldBegin('exception_nodes', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.exception_nodes))
for iter12 in self.exception_nodes:
oprot.writeString(iter12.encode('utf-8') if sys.version_info[0] == 2 else iter12)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.installed_nodes is not None:
oprot.writeFieldBegin('installed_nodes', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.installed_nodes))
for iter13 in self.installed_nodes:
oprot.writeString(iter13.encode('utf-8') if sys.version_info[0] == 2 else iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ncTPackageInfo(object):
"""
Attributes:
- service_name
- package_name
- package_version
- upload_time
- package_size
- package_md5
- object_id
"""
def __init__(self, service_name=None, package_name=None, package_version=None, upload_time=None, package_size=None, package_md5=None, object_id=None,):
self.service_name = service_name
self.package_name = package_name
self.package_version = package_version
self.upload_time = upload_time
self.package_size = package_size
self.package_md5 = package_md5
self.object_id = object_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.service_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.package_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.package_version = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.upload_time = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.package_size = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.package_md5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.object_id = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ncTPackageInfo')
if self.service_name is not None:
oprot.writeFieldBegin('service_name', TType.STRING, 1)
oprot.writeString(self.service_name.encode('utf-8') if sys.version_info[0] == 2 else self.service_name)
oprot.writeFieldEnd()
if self.package_name is not None:
oprot.writeFieldBegin('package_name', TType.STRING, 2)
oprot.writeString(self.package_name.encode('utf-8') if sys.version_info[0] == 2 else self.package_name)
oprot.writeFieldEnd()
if self.package_version is not None:
oprot.writeFieldBegin('package_version', TType.STRING, 3)
oprot.writeString(self.package_version.encode('utf-8') if sys.version_info[0] == 2 else self.package_version)
oprot.writeFieldEnd()
if self.upload_time is not None:
oprot.writeFieldBegin('upload_time', TType.I64, 4)
oprot.writeI64(self.upload_time)
oprot.writeFieldEnd()
if self.package_size is not None:
oprot.writeFieldBegin('package_size', TType.I64, 5)
oprot.writeI64(self.package_size)
oprot.writeFieldEnd()
if self.package_md5 is not None:
oprot.writeFieldBegin('package_md5', TType.STRING, 6)
oprot.writeString(self.package_md5.encode('utf-8') if sys.version_info[0] == 2 else self.package_md5)
oprot.writeFieldEnd()
if self.object_id is not None:
oprot.writeFieldBegin('object_id', TType.STRING, 7)
oprot.writeString(self.object_id.encode('utf-8') if sys.version_info[0] == 2 else self.object_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ncTMongoDBInfo(object):
"""
Attributes:
- hosts
- port
"""
def __init__(self, hosts=[
], port=0,):
if hosts is self.thrift_spec[1][4]:
hosts = [
]
self.hosts = hosts
self.port = port
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.hosts = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.hosts.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.port = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ncTMongoDBInfo')
if self.hosts is not None:
oprot.writeFieldBegin('hosts', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.hosts))
for iter20 in self.hosts:
oprot.writeString(iter20.encode('utf-8') if sys.version_info[0] == 2 else iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 2)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class VipInfo(object):
"""
Attributes:
- ovip
- ivip
- mask
- nic
- sys
"""
def __init__(self, ovip="", ivip="", mask="", nic="", sys=1,):
self.ovip = ovip
self.ivip = ivip
self.mask = mask
self.nic = nic
self.sys = sys
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ovip = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ivip = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.mask = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.nic = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.sys = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('VipInfo')
if self.ovip is not None:
oprot.writeFieldBegin('ovip', TType.STRING, 1)
oprot.writeString(self.ovip.encode('utf-8') if sys.version_info[0] == 2 else self.ovip)
oprot.writeFieldEnd()
if self.ivip is not None:
oprot.writeFieldBegin('ivip', TType.STRING, 2)
oprot.writeString(self.ivip.encode('utf-8') if sys.version_info[0] == 2 else self.ivip)
oprot.writeFieldEnd()
if self.mask is not None:
oprot.writeFieldBegin('mask', TType.STRING, 3)
oprot.writeString(self.mask.encode('utf-8') if sys.version_info[0] == 2 else self.mask)
oprot.writeFieldEnd()
if self.nic is not None:
oprot.writeFieldBegin('nic', TType.STRING, 4)
oprot.writeString(self.nic.encode('utf-8') if sys.version_info[0] == 2 else self.nic)
oprot.writeFieldEnd()
if self.sys is not None:
oprot.writeFieldBegin('sys', TType.I32, 5)
oprot.writeI32(self.sys)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class HaNodeInfo(object):
"""
Attributes:
- node_uuid
- node_ip
- is_master
- ha_sys
"""
def __init__(self, node_uuid="", node_ip="", is_master=False, ha_sys=0,):
self.node_uuid = node_uuid
self.node_ip = node_ip
self.is_master = is_master
self.ha_sys = ha_sys
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.node_uuid = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.node_ip = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.is_master = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.ha_sys = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('HaNodeInfo')
if self.node_uuid is not None:
oprot.writeFieldBegin('node_uuid', TType.STRING, 1)
oprot.writeString(self.node_uuid.encode('utf-8') if sys.version_info[0] == 2 else self.node_uuid)
oprot.writeFieldEnd()
if self.node_ip is not None:
oprot.writeFieldBegin('node_ip', TType.STRING, 2)
oprot.writeString(self.node_ip.encode('utf-8') if sys.version_info[0] == 2 else self.node_ip)
oprot.writeFieldEnd()
if self.is_master is not None:
oprot.writeFieldBegin('is_master', TType.BOOL, 3)
oprot.writeBool(self.is_master)
oprot.writeFieldEnd()
if self.ha_sys is not None:
oprot.writeFieldBegin('ha_sys', TType.I32, 4)
oprot.writeI32(self.ha_sys)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ReleaseInfo(object):
"""
Attributes:
- release_name
"""
def __init__(self, release_name="",):
self.release_name = release_name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.release_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReleaseInfo')
if self.release_name is not None:
oprot.writeFieldBegin('release_name', TType.STRING, 1)
oprot.writeString(self.release_name.encode('utf-8') if sys.version_info[0] == 2 else self.release_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CSNodeInfo(object):
"""
Attributes:
- cs_node_name
- node_uuid
"""
def __init__(self, cs_node_name="", node_uuid="",):
self.cs_node_name = cs_node_name
self.node_uuid = node_uuid
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.cs_node_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.node_uuid = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('CSNodeInfo')
if self.cs_node_name is not None:
oprot.writeFieldBegin('cs_node_name', TType.STRING, 1)
oprot.writeString(self.cs_node_name.encode('utf-8') if sys.version_info[0] == 2 else self.cs_node_name)
oprot.writeFieldEnd()
if self.node_uuid is not None:
oprot.writeFieldBegin('node_uuid', TType.STRING, 2)
oprot.writeString(self.node_uuid.encode('utf-8') if sys.version_info[0] == 2 else self.node_uuid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ContainerizedServiceInfo(object):
"""
Attributes:
- service_name
- available_version
- installed_version
- nodes
- available_package
- replicas
"""
def __init__(self, service_name="", available_version="", installed_version="", nodes=[
], available_package="", replicas=0,):
self.service_name = service_name
self.available_version = available_version
self.installed_version = installed_version
if nodes is self.thrift_spec[4][4]:
nodes = [
]
self.nodes = nodes
self.available_package = available_package
self.replicas = replicas
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.service_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.available_version = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.installed_version = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.nodes = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.nodes.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.available_package = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.replicas = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ContainerizedServiceInfo')
if self.service_name is not None:
oprot.writeFieldBegin('service_name', TType.STRING, 1)
oprot.writeString(self.service_name.encode('utf-8') if sys.version_info[0] == 2 else self.service_name)
oprot.writeFieldEnd()
if self.available_version is not None:
oprot.writeFieldBegin('available_version', TType.STRING, 2)
oprot.writeString(self.available_version.encode('utf-8') if sys.version_info[0] == 2 else self.available_version)
oprot.writeFieldEnd()
if self.installed_version is not None:
oprot.writeFieldBegin('installed_version', TType.STRING, 3)
oprot.writeString(self.installed_version.encode('utf-8') if sys.version_info[0] == 2 else self.installed_version)
oprot.writeFieldEnd()
if self.nodes is not None:
oprot.writeFieldBegin('nodes', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.nodes))
for iter27 in self.nodes:
oprot.writeString(iter27.encode('utf-8') if sys.version_info[0] == 2 else iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.available_package is not None:
oprot.writeFieldBegin('available_package', TType.STRING, 5)
oprot.writeString(self.available_package.encode('utf-8') if sys.version_info[0] == 2 else self.available_package)
oprot.writeFieldEnd()
if self.replicas is not None:
oprot.writeFieldBegin('replicas', TType.I32, 6)
oprot.writeI32(self.replicas)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ServiceConf(object):
"""
Attributes:
- service_name
- node_ips
"""
def __init__(self, service_name="", node_ips=[
],):
self.service_name = service_name
if node_ips is self.thrift_spec[2][4]:
node_ips = [
]
self.node_ips = node_ips
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.service_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.node_ips = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in range(_size28):
_elem33 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.node_ips.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ServiceConf')
if self.service_name is not None:
oprot.writeFieldBegin('service_name', TType.STRING, 1)
oprot.writeString(self.service_name.encode('utf-8') if sys.version_info[0] == 2 else self.service_name)
oprot.writeFieldEnd()
if self.node_ips is not None:
oprot.writeFieldBegin('node_ips', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.node_ips))
for iter34 in self.node_ips:
oprot.writeString(iter34.encode('utf-8') if sys.version_info[0] == 2 else iter34)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ncTServiceInfos)
ncTServiceInfos.thrift_spec = (
None, # 0
(1, TType.STRING, 'service_name', 'UTF8', None, ), # 1
(2, TType.STRING, 'service_version', 'UTF8', None, ), # 2
(3, TType.LIST, 'exception_nodes', (TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.LIST, 'installed_nodes', (TType.STRING, 'UTF8', False), None, ), # 4
)
all_structs.append(ncTPackageInfo)
ncTPackageInfo.thrift_spec = (
None, # 0
(1, TType.STRING, 'service_name', 'UTF8', None, ), # 1
(2, TType.STRING, 'package_name', 'UTF8', None, ), # 2
(3, TType.STRING, 'package_version', 'UTF8', None, ), # 3
(4, TType.I64, 'upload_time', None, None, ), # 4
(5, TType.I64, 'package_size', None, None, ), # 5
(6, TType.STRING, 'package_md5', 'UTF8', None, ), # 6
(7, TType.STRING, 'object_id', 'UTF8', None, ), # 7
)
all_structs.append(ncTMongoDBInfo)
ncTMongoDBInfo.thrift_spec = (
None, # 0
(1, TType.LIST, 'hosts', (TType.STRING, 'UTF8', False), [
], ), # 1
(2, TType.I32, 'port', None, 0, ), # 2
)
all_structs.append(VipInfo)
VipInfo.thrift_spec = (
None, # 0
(1, TType.STRING, 'ovip', 'UTF8', "", ), # 1
(2, TType.STRING, 'ivip', 'UTF8', "", ), # 2
(3, TType.STRING, 'mask', 'UTF8', "", ), # 3
(4, TType.STRING, 'nic', 'UTF8', "", ), # 4
(5, TType.I32, 'sys', None, 1, ), # 5
)
all_structs.append(HaNodeInfo)
HaNodeInfo.thrift_spec = (
None, # 0
(1, TType.STRING, 'node_uuid', 'UTF8', "", ), # 1
(2, TType.STRING, 'node_ip', 'UTF8', "", ), # 2
(3, TType.BOOL, 'is_master', None, False, ), # 3
(4, TType.I32, 'ha_sys', None, 0, ), # 4
)
all_structs.append(ReleaseInfo)
ReleaseInfo.thrift_spec = (
None, # 0
(1, TType.STRING, 'release_name', 'UTF8', "", ), # 1
)
all_structs.append(CSNodeInfo)
CSNodeInfo.thrift_spec = (
None, # 0
(1, TType.STRING, 'cs_node_name', 'UTF8', "", ), # 1
(2, TType.STRING, 'node_uuid', 'UTF8', "", ), # 2
)
all_structs.append(ContainerizedServiceInfo)
ContainerizedServiceInfo.thrift_spec = (
None, # 0
(1, TType.STRING, 'service_name', 'UTF8', "", ), # 1
(2, TType.STRING, 'available_version', 'UTF8', "", ), # 2
(3, TType.STRING, 'installed_version', 'UTF8', "", ), # 3
(4, TType.LIST, 'nodes', (TType.STRING, 'UTF8', False), [
], ), # 4
(5, TType.STRING, 'available_package', 'UTF8', "", ), # 5
(6, TType.I32, 'replicas', None, 0, ), # 6
)
all_structs.append(ServiceConf)
ServiceConf.thrift_spec = (
None, # 0
(1, TType.STRING, 'service_name', 'UTF8', "", ), # 1
(2, TType.LIST, 'node_ips', (TType.STRING, 'UTF8', False), [
], ), # 2
)
fix_spec(all_structs)
del all_structs
| [
"gu.wenpeng@eisoo.com"
] | gu.wenpeng@eisoo.com |
10ffed599ac0d1038847cb9bd88e3bb7cfa7baa5 | 245381ad175dcc03ee0710964340eed4daa2ef85 | /shagroup/asgi.py | 68a2f8abe49c2e7dc7b2c96529a21083cda0f947 | [] | no_license | musabansari-1/Shagroup-erp-backend | 2c1f56f7ce5763dae668d160cdcc1a26dbc2e8d7 | 87845f11faae50301d5bb73ffa0c3ee0bed38256 | refs/heads/main | 2023-04-13T02:25:36.808755 | 2021-04-15T16:28:19 | 2021-04-15T16:28:19 | 358,324,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for shagroup project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shagroup.settings')
application = get_asgi_application()
| [
"musabzahida@gmail.com"
] | musabzahida@gmail.com |
a5e95a78db593a7a838f3c05604b0d18e3f5e2b0 | 6af6c7158609f889fa1f53c99b63c435113d496e | /RLTutorial/modelFreeValue.py | c57ba1bad60aeb8faff7827ff764ece01fdbbfe2 | [
"MIT"
] | permissive | fyabc/MSRAPaperProject | 170752a5b8bfdecbab876841762d8fd2f9732f08 | 2d7974acfe8065523d0c56da695807e94acd0b34 | refs/heads/master | 2020-04-06T08:03:08.211020 | 2016-09-07T10:28:11 | 2016-09-07T10:28:11 | 64,015,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | #! /usr/bin/python3
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
from MDP import MDP
import version23
__author__ = 'fyabc'
def MonteCarlo(mdp, stateSamples, actionSamples, rewardSamples):
vFunc = {
state: 0.0
for state in mdp.states
}
nFunc = {
state: 0
for state in mdp.states
}
for i in range(len(stateSamples)):
# g: total rewards
g = 0.0
for step in range(len(stateSamples[i]) - 1, -1, -1):
g *= mdp.gamma
g += rewardSamples[i][step]
# Using every MC method
for step in range(len(stateSamples[i])):
state = stateSamples[i][step]
vFunc[state] += g
nFunc[state] += 1
g -= rewardSamples[i][step]
g /= mdp.gamma
for state in mdp.states:
if nFunc[state] > 0:
vFunc[state] /= nFunc[state]
return vFunc
def temporalDifference(mdp, alpha, stateSamples, actionSamples, rewardSamples):
# TD(0)
# TD update: v(s) = v(s) + \alpha * (r + \gamma * v(s') - v(s))
vFunc = {
state: 0.0
for state in mdp.states
}
for i in range(len(stateSamples)):
for step in range(len(stateSamples[i])):
state = stateSamples[i][step]
reward = rewardSamples[i][step]
if step < len(stateSamples[i]) - 1:
nextState = stateSamples[i][step + 1]
nextV = vFunc[nextState]
else:
nextV = 0.0
vFunc[state] += alpha * (reward + mdp.gamma * nextV - vFunc[state])
return vFunc
def test():
mdp = MDP(0.5)
vFunc = MonteCarlo(mdp, *mdp.randomWalkSamples(100))
print('Monte Carlo:')
for i in range(1, 6):
print('%d: %f\t' % (i, vFunc[i]), end='')
print()
vFunc = temporalDifference(mdp, 0.15, *mdp.randomWalkSamples(100))
print('Temporal Difference:')
for i in range(1, 6):
print('%d: %f\t' % (i, vFunc[i]), end='')
print()
if __name__ == '__main__':
test()
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
7c1155cf96173f5b9655062ea7bac26a067474ff | a192078ebd74d54db64d02d815d805b4e8d9a0c6 | /GeneratePAMLChemopartitions.py | 4396e3399c57c88109daaa45fc7437cb7a69175c | [] | no_license | rjovelin/CRM_POPVAR | ae1fe4b55345dd41b6c8ed4566a4ebce8b1ac437 | 9fe6d475f834b1bf9cfad26248d16dadb9c4f2ae | refs/heads/master | 2021-01-21T14:23:53.028058 | 2016-07-04T18:08:33 | 2016-07-04T18:08:33 | 57,171,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,729 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 30 18:38:26 2016
@author: Richard
"""
# use this script to generate PAML alignment files for chemoreceptor gene membrane partitions
from manipulate_sequences import *
from chemoreceptors import *
import os
# set number of minimum codons of each partition
MinimumCodons = 5
# get the set of chemoreceptors from the iprscan outputfile
chemo = get_chemoreceptors('../Genome_Files/PX356_protein_seq.tsv')
print('got chemo genes')
# create a set of valid transcripts
transcripts = get_valid_transcripts('../Genome_Files/unique_transcripts.txt')
print('got valid transcripts')
# create a set of valid chemoreceptors
GPCRs = set(gene for gene in chemo if gene in transcripts)
print('got valid GPCR genes')
# create a dict with the remanei CDS
CDS = convert_fasta('../Genome_Files/noamb_PX356_all_CDS.fasta')
print('got CDS sequences')
# create directories to store the aligned partitions
os.mkdir('Partitions')
os.mkdir('./Partitions/Membrane/')
os.mkdir('./Partitions/Extra_membrane/')
os.mkdir('./Partitions/Inside/')
os.mkdir('./Partitions/Outside/')
print('created directories')
# make a list of files in alignment directory
ali_files = os.listdir('../CREM_CLA_protein_divergence/pairs/Aligned_pairs/')
print('made a list of files')
# make a list of alignment files
alignments = [filename for filename in ali_files if filename[-8:] == '_aln.tfa']
print('made a list of aligned sequence pairs')
# loop over genes in GPCRs
for gene in GPCRs:
# loop over alignment
for filename in alignments:
# check that gene in filename
if gene == filename[:filename.index('_CLA')]:
# get the aligned codons
codons = get_aligned_codons(filename, '../CREM_CLA_protein_divergence/pairs/Aligned_pairs/')
# get the dict of probabilities
probabilities = parse_phobius_output(gene + '_proba.txt', './Chemo_genes/')
# get a list of codon index
codon_index = [i for i in codons]
# sort list
codon_index.sort()
# get the list of amino acid index
aa_index = [i for i in probabilities]
# sort list
aa_index.sort()
# check that the list of index are the same
if aa_index != codon_index:
print(gene, codon_index, aa_index)
raise ValueError('Codon and AA index lists are different')
# create sequences to store the different partitions
crm_TM, crm_intra, crm_extra, crm_not_TM, cla_TM, cla_intra, cla_extra, cla_not_TM = '', '', '', '', '', '', '', ''
# loop over the aa_indices
for i in aa_index:
# check that sequences in each dict is the same
if probabilities[i][0] != cds_translate(codons[i][0]):
raise ValueError('Protein sequences in ortholog and probability dicts are different')
# check probabilities and build sequences
if probabilities[i][1] >= 0.95:
# build intra and not membrane sequences
crm_intra += codons[i][0]
cla_intra += codons[i][1]
crm_not_TM += codons[i][0]
cla_not_TM += codons[i][1]
elif probabilities[i][2] >= 0.95:
# build outside and not membrane sequences
crm_extra += codons[i][0]
cla_extra += codons[i][1]
crm_not_TM += codons[i][0]
cla_not_TM += codons[i][1]
elif probabilities[i][3] >= 0.95:
# build membrane sequences
crm_TM += codons[i][0]
cla_TM += codons[i][1]
elif probabilities[i][4] >= 0.95:
# build not_membrane sequences
crm_not_TM += codons[i][0]
cla_not_TM += codons[i][1]
# get cla_gene name
cla_gene = filename[filename.index('CLA'):filename.index('_aln')]
# check that remanei sequence is not empty and that latens sequence has minimum codons
if len(crm_TM) != 0 and len(crm_not_TM) != 0 and len(cla_TM.replace('-', '')) >= MinimumCodons and len(cla_not_TM.replace('-', '')) >= MinimumCodons:
# gene has both membrane and extra-membrane residues
# open file for writing
newfile = open('./Partitions/Membrane/' + gene + '_TM.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_TM)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_TM + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_TM + '\n')
newfile.close()
# open file for writing
newfile = open('./Partitions/Extra_membrane/' + gene + '_ExtraTM.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_not_TM)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_not_TM + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_not_TM + '\n')
newfile.close()
if len(crm_intra) != 0 and len(cla_intra.replace('-', '')) >= MinimumCodons:
# gene has intra-cellular domain
# open file for writing
newfile = open('./Partitions/Inside/' + gene + '_inside.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_intra)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_intra + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_intra + '\n')
newfile.close()
if len(crm_extra) != 0 and len(cla_extra.replace('-', '')) >= MinimumCodons:
# gene has extra-cellular domain
# open file for writing
newfile = open('./Partitions/Outside/' + gene + '_outside.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_extra)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_extra + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_extra + '\n')
newfile.close()
| [
"richard.jovelin@oicr.on.ca"
] | richard.jovelin@oicr.on.ca |
05e0c4be9187c69e133664eedd04e6dc36798554 | d3b77550a40b860970450e702b6bcd28d5f9b3e4 | /Hackerrank/problem_solving/implementation/Break_the_records.py | b150aeec27614733888d1b3cafb916de5e67b008 | [] | no_license | CateGitau/Python_programming | 47bc9277544814ad853b44a88f129713f1a40697 | 6ae42b3190134c4588ad785d62e08b0763cf6b3a | refs/heads/master | 2023-07-08T03:08:46.236063 | 2021-08-12T09:38:03 | 2021-08-12T09:38:03 | 228,712,021 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 09:07:41 2020
@author: aims
"""
scores = [17,45,41,60,17,41,76,43,51,40,89,92,34,6,64,7,37,81,32,50]
def breakingRecords(scores):
high = []
high.append(scores[0])
low = []
low.append(scores[0])
count_max = 0
count_min = 0
for i, j in zip(scores[1:], high):
if i < j:
high.append(j)
else:
high.append(i)
count_max += 1
for i, k in zip(scores[1:], low):
if i < k:
low.append(i)
count_min += 1
elif i == k:
low.append(i)
else:
low.append(k)
print(high)
print(low)
return(count_max, count_min)
print(breakingRecords(scores)) | [
"catherinegitau94@gmail.com"
] | catherinegitau94@gmail.com |
7b98411d65809d3254ba52dfd4f00395db75f254 | a5a4cee972e487512275c34f308251e6cc38c2fa | /pypospack/pyposmat/visualization/plot_2d_density_new.py | 06a52f350768e1a703822139dcea4ad5249f251b | [
"MIT"
] | permissive | eragasa/pypospack | 4f54983b33dcd2dce5b602bc243ea8ef22fee86b | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | refs/heads/master | 2021-06-16T09:24:11.633693 | 2019-12-06T16:54:02 | 2019-12-06T16:54:02 | 99,282,824 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from pypospack.pyposmat.visualization import PyposmatAbstractPlot
class Pyposmat2DDensityPlot(PyposmatAbstractPlot):
kde_bandwidth_types = ['silverman','silverman1986','chiu1999']
def __init__(self,config=None,data=None):
PyposmatAbstractPlot.__init__(self,config=config,data=data)
self.x_limits = None
self.y_limits = None
def determine_limits(self,name,ppf_min=0.1,ppf_max=0.9):
assert name in self.configuration.qoi_names \
or name in self.configuration.parameter_names
assert isinstance(ppf_min,float)
assert isinstance(ppf_max,float)
norm_rv = stats.norm(
loc = self.data.df[name].mean(),
scale = self.data.df[name].std()
)
lim_min = norm_rv.ppf(ppf_min)
lim_max = norm_rv.ppf(ppf_max)
return lim_min,lim_max
def determine_x_limits(self,x_name=None,x_limits=None,ppf_min=0.1,ppf_max=0.9):
assert x_name is None or isinstance(x_name,str)
assert x_limits is None or isinstance(x_name,list)
assert isinstance(ppf_min,float)
assert isinstance(ppf_max,float)
if x_name is None:
x_name = self.x_name
if x_limits is None:
x_lim_min,x_lim_max = self.determine_limits(x_name)
self.x_limits = (x_lim_min,x_lim_max)
else:
self.x_limits = x_limits
return self.x_limits
def determine_y_limits(self,y_name=None,y_limits=None,ppf_min=0.1,ppf_max=0.9):
assert y_name is None or isinstance(y_name,str)
assert y_limits is None or isinstance(y_name,list)
assert isinstance(ppf_min,float)
assert isinstance(ppf_max,float)
if y_name is None:
y_name = self.y_name
if y_limits is None:
y_lim_min,y_lim_max = self.determine_limits(y_name)
self.y_limits = (y_lim_min,y_lim_max)
else:
self.y_limits = y_limits
return self.y_limits
def plot(self,
x_name,y_name,
with_kde_plot=True,
with_data_plot=True,
x_limits=None,y_limits=None,h=None):
assert x_name in self.configuration.qoi_names \
or x_name in self.configuration.parameter_names
assert y_name in self.configuration.qoi_names \
or y_name in self.configuration.parameter_names
assert x_limits is None \
or isinstance(x_limits,list)
assert y_limits is None \
or isinstance(y_limits,list)
assert h is None \
or h in kde_bandwidth_types
self.x_name = x_name
self.y_name = y_name
self.determine_x_limits()
self.determine_y_limits()
x = self.data.df[x_name].values
y = self.data.df[y_name].values
if self.fig is None or self.ax is None:
self.create_subplots()
if with_kde_plot:
self.plot_kde(x,y,h)
if with_data_plot:
self.plot_data_points(x,y)
self.ax.set_xlim(self.x_limits[0],self.x_limits[1])
self.ax.set_ylim(self.y_limits[0],self.y_limits[1])
xy_grid = np.vstack([x,y])
kde = self.make_kde(x,y,h=h)
def plot_kde(self,x,y,h=None,XY_cmap_name='Blues'):
# build the grid
xmin = self.x_limits[0]
xmax = self.x_limits[1]
ymin = self.y_limits[0]
ymax = self.y_limits[1]
X_density=200j
Y_density=200j
X,Y = np.mgrid[xmin:xmax:X_density,ymin:ymax:Y_density]
XY_grid = np.vstack([X.ravel(),Y.ravel()])
# evaluate density on the grid
kde = self.make_kde(x,y,h)
Z = np.reshape(kde(XY_grid),X.shape)
aspectratio=(xmax-xmin)/(ymax-ymin)
self.ax.imshow(
np.rot90(Z),
cmap=plt.get_cmap(XY_cmap_name),
extent=[xmin,xmax,ymin,ymax],
aspect=aspectratio)
def plot_data_points(self,x,y,size=1):
self.ax.scatter(x,y,s=1)
def make_kde(self,x,y,h=None):
assert h is None or h in kde_bandwidth_types
values=np.vstack([x,y])
if h is None:
kde = stats.gaussian_kde(values)
elif h in ['silverman','silverman1986']:
kde = stats.gaussian_kde(values,'silverman')
elif h is 'chiu1999':
h = Chiu1999_h(values)
kde = stats.gaussian_kde(value,h)
else:
raise ValueError(h)
return kde
| [
"eragasa@ufl.edu"
] | eragasa@ufl.edu |
9b6a8004b396ffea385ddfe4ebf79d7c40d5a1d2 | 1d8a4659d4a13cd8b0244918484990bb000687ea | /OpenPROD/openprod-addons/tracker/__openerp__.py | 4ea0f630e42a6909f66a9cb9ad244b53b60136b7 | [] | no_license | kazacube-mziouadi/ceci | d8218ede129186c26eb36f251ef42f07c7a74883 | eb394e1f79ba1995da2dcd81adfdd511c22caff9 | refs/heads/master | 2020-03-23T00:22:51.501409 | 2018-07-13T14:58:32 | 2018-07-13T14:58:32 | 140,859,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
{
'name': 'Issue tracker',
'version': '1.1',
'category': 'Hidden/Dependency',
'license': 'LGPL',
'description': """""",
'author': 'Objectif-PI',
'website': '',
'depends': ['base_openprod'],
'data': [
'tracker_view.xml',
'data/sequence.xml',
'wizard/wizard_create_timetracking_view.xml',
'security/security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
}
| [
"mziouadi@kazacube.com"
] | mziouadi@kazacube.com |
6a3f39ab36ccefaa00d25665ca1727ee346b7edb | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_vpn_site_links_operations.py | 265aa7a78b25683f6dc3911e16d959120a02b052 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 8,668 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnSiteLinksOperations(object):
"""VpnSiteLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
vpn_site_link_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VpnSiteLink"
"""Retrieves the details of a VPN site link.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite.
:type vpn_site_name: str
:param vpn_site_link_name: The name of the VpnSiteLink being retrieved.
:type vpn_site_link_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSiteLink, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.VpnSiteLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnSiteLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
'vpnSiteLinkName': self._serialize.url("vpn_site_link_name", vpn_site_link_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSiteLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}/vpnSiteLinks/{vpnSiteLinkName}'} # type: ignore
def list_by_vpn_site(
self,
resource_group_name, # type: str
vpn_site_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListVpnSiteLinksResult"]
"""Lists all the vpnSiteLinks in a resource group for a vpn site.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSiteLinksResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ListVpnSiteLinksResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListVpnSiteLinksResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_site.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSiteLinksResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_vpn_site.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}/vpnSiteLinks'} # type: ignore
| [
"noreply@github.com"
] | paultaiton.noreply@github.com |
023134c6be263efb76d2c2ab006b16a5f881e11f | b251224f132b505f7c549f1fdca6b945cec4d7eb | /rotkehlchen/tests/test_poloniex.py | 0820785d556ffcde426b8137235f0db398bf87f6 | [
"BSD-3-Clause"
] | permissive | ltfschoen/rotkehlchen | 75a6ce58e484aeff87e335a6815cdae7fecd7eeb | 70b5cb79796718e86451ca4ef7116e26bfc19063 | refs/heads/master | 2020-07-02T09:20:36.118146 | 2019-08-08T21:12:37 | 2019-08-08T21:37:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,322 | py | import os
from unittest.mock import patch
import pytest
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import UNSUPPORTED_POLONIEX_ASSETS, asset_from_poloniex
from rotkehlchen.constants.assets import A_BTC, A_ETH
from rotkehlchen.errors import DeserializationError, UnsupportedAsset
from rotkehlchen.fval import FVal
from rotkehlchen.order_formatting import Loan, Trade, TradeType
from rotkehlchen.poloniex import Poloniex, process_polo_loans, trade_from_poloniex
from rotkehlchen.tests.utils.constants import A_DASH
from rotkehlchen.tests.utils.exchanges import POLONIEX_MOCK_DEPOSIT_WITHDRAWALS_RESPONSE
from rotkehlchen.tests.utils.mock import MockResponse
from rotkehlchen.typing import Timestamp
from rotkehlchen.user_messages import MessagesAggregator
TEST_RATE_STR = '0.00022999'
TEST_AMOUNT_STR = '613.79427133'
TEST_PERC_FEE_STR = '0.0015'
TEST_POLO_TRADE = {
'globalTradeID': 192167,
'tradeID': 3727,
'date': '2017-07-22 21:18:37',
'rate': TEST_RATE_STR,
'amount': TEST_AMOUNT_STR,
'total': '0.14116654',
'fee': TEST_PERC_FEE_STR,
'orderNumber': '2315432',
'type': 'sell',
'category': 'exchange',
}
TEST_POLO_LOAN_1 = {
'id': 3, # we don't read that in Rotkehlchen
'rate': '0.001', # we don't read that in Rotkehlchen
'duration': '0.001', # we don't read that in Rotkehlchen
'interest': '0.00000005', # we don't read that in Rotkehlchen
'open': '2017-01-24 06:05:04',
'close': '2017-01-24 10:05:04',
'currency': 'DASH', # cryptocompare hourly DASH/EUR: 13.22106438
'fee': '0.00015',
'earned': '0.003',
'amount': '2',
}
TEST_POLO_LOAN_2 = {
'id': 4, # we don't read that in Rotkehlchen
'rate': '0.001', # we don't read that in Rotkehlchen
'duration': '0.001', # we don't read that in Rotkehlchen
'interest': '0.00000005', # we don't read that in Rotkehlchen
'open': '2017-02-13 19:07:01',
'close': '2017-02-13 23:05:04',
'currency': 'DASH', # cryptocompare hourly DASH/EUR: 15.73995672
'fee': '0.00011',
'earned': '0.0035',
'amount': '2',
}
def test_trade_from_poloniex():
amount = FVal(TEST_AMOUNT_STR)
rate = FVal(TEST_RATE_STR)
perc_fee = FVal(TEST_PERC_FEE_STR)
cost = amount * rate
trade = trade_from_poloniex(TEST_POLO_TRADE, 'BTC_ETH')
assert isinstance(trade, Trade)
assert isinstance(trade.timestamp, int)
assert trade.timestamp == 1500758317
assert trade.trade_type == TradeType.SELL
assert trade.rate == rate
assert trade.amount == amount
assert trade.pair == 'ETH_BTC'
assert trade.fee == cost * perc_fee
assert trade.fee_currency == 'BTC'
assert trade.location == 'poloniex'
def test_poloniex_trade_deserialization_errors():
test_trade = TEST_POLO_TRADE.copy()
test_trade['date'] = '2017/07/22 1:18:37'
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['type'] = 'lololol'
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['amount'] = None
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['rate'] = None
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
test_trade['fee'] = ['a']
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
test_trade = TEST_POLO_TRADE.copy()
del test_trade['rate']
with pytest.raises(DeserializationError):
trade_from_poloniex(test_trade, 'BTC_ETH')
def test_process_polo_loans():
raw_data = [TEST_POLO_LOAN_1, TEST_POLO_LOAN_2]
msg_aggregator = MessagesAggregator()
loans = process_polo_loans(msg_aggregator, raw_data, 0, 1564262858)
assert len(loans) == 2
assert isinstance(loans[0], Loan)
assert loans[0].open_time == Timestamp(1485237904)
assert loans[0].close_time == Timestamp(1485252304)
assert isinstance(loans[0].currency, Asset)
assert loans[0].currency == A_DASH
assert loans[0].fee == FVal('0.00015')
assert loans[0].earned == FVal('0.003')
assert loans[0].amount_lent == FVal('2')
assert isinstance(loans[1], Loan)
assert loans[1].open_time == Timestamp(1487012821)
assert loans[1].close_time == Timestamp(1487027104)
assert isinstance(loans[1].currency, Asset)
assert loans[1].currency == A_DASH
assert loans[1].fee == FVal('0.00011')
assert loans[1].earned == FVal('0.0035')
assert loans[1].amount_lent == FVal('2')
# Test different start/end timestamps
loans = process_polo_loans(msg_aggregator, raw_data, 1485252305, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
loans = process_polo_loans(msg_aggregator, raw_data, 0, 1487012820)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1485252304)
def test_process_polo_loans_unexpected_data():
"""Test that with unexpected data the offending loan is skipped and an error generated"""
msg_aggregator = MessagesAggregator()
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['close'] = 'xx2017-xxs07-22 21:18:37'
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['open'] = 'xx2017-xxs07-22 21:18:37'
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['fee'] = 'sdad'
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['earned'] = None
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
broken_loan = TEST_POLO_LOAN_1.copy()
broken_loan['amount'] = ['something']
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
# And finally test that missing an expected entry is also handled
broken_loan = TEST_POLO_LOAN_1.copy()
del broken_loan['amount']
loans = process_polo_loans(msg_aggregator, [broken_loan, TEST_POLO_LOAN_2], 0, 1564262858)
assert len(loans) == 1
assert loans[0].close_time == Timestamp(1487027104)
assert len(msg_aggregator.consume_errors()) == 1
def test_poloniex_trade_with_asset_needing_conversion():
amount = FVal(613.79427133)
rate = FVal(0.00022999)
perc_fee = FVal(0.0015)
poloniex_trade = {
'globalTradeID': 192167,
'tradeID': FVal(3727.0),
'date': '2017-07-22 21:18:37',
'rate': rate,
'amount': amount,
'total': FVal(0.14116654),
'fee': perc_fee,
'orderNumber': FVal(2315432.0),
'type': 'sell',
'category': 'exchange',
}
trade = trade_from_poloniex(poloniex_trade, 'AIR_BTC')
assert trade.pair == 'BTC_AIR-2'
assert trade.location == 'poloniex'
def test_query_trade_history_not_shared_cache(data_dir):
"""Test that having 2 different poloniex instances does not use same cache
Regression test for https://github.com/rotkehlchenio/rotkehlchen/issues/232
We are using poloniex as an example here. Essentially tests all exchange caches.
"""
def first_trades(currency_pair, start, end): # pylint: disable=unused-argument
return {'BTC': [{'data': 1}]}
def second_trades(currency_pair, start, end): # pylint: disable=unused-argument
return {'BTC': [{'data': 2}]}
messages_aggregator = MessagesAggregator()
end_ts = 99999999999
first_user_dir = os.path.join(data_dir, 'first')
os.mkdir(first_user_dir)
second_user_dir = os.path.join(data_dir, 'second')
os.mkdir(second_user_dir)
a = Poloniex(b'', b'', first_user_dir, messages_aggregator)
with patch.object(a, 'return_trade_history', side_effect=first_trades):
result1 = a.query_trade_history(0, end_ts, end_ts)
b = Poloniex(b'', b'', second_user_dir, messages_aggregator)
with patch.object(b, 'return_trade_history', side_effect=second_trades):
result2 = b.query_trade_history(0, end_ts, end_ts)
assert result1['BTC'][0]['data'] == 1
assert result2['BTC'][0]['data'] == 2
def test_poloniex_assets_are_known(poloniex):
currencies = poloniex.return_currencies()
for poloniex_asset in currencies.keys():
try:
_ = asset_from_poloniex(poloniex_asset)
except UnsupportedAsset:
assert poloniex_asset in UNSUPPORTED_POLONIEX_ASSETS
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_query_balances_unknown_asset(function_scope_poloniex):
"""Test that if a poloniex balance query returns unknown asset no exception
is raised and a warning is generated. Same for unsupported assets"""
poloniex = function_scope_poloniex
def mock_unknown_asset_return(url, req): # pylint: disable=unused-argument
response = MockResponse(
200,
"""{
"BTC": {"available": "5.0", "onOrders": "0.5"},
"ETH": {"available": "10.0", "onOrders": "1.0"},
"IDONTEXIST": {"available": "1.0", "onOrders": "2.0"},
"CNOTE": {"available": "2.0", "onOrders": "3.0"}
}""")
return response
with patch.object(poloniex.session, 'post', side_effect=mock_unknown_asset_return):
# Test that after querying the assets only ETH and BTC are there
balances, msg = poloniex.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC]['amount'] == FVal('5.5')
assert balances[A_ETH]['amount'] == FVal('11.0')
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == 2
assert 'unknown poloniex asset IDONTEXIST' in warnings[0]
assert 'unsupported poloniex asset CNOTE' in warnings[1]
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_deposits_withdrawal_unknown_asset(function_scope_poloniex):
"""Test that if a poloniex asset movement query returns unknown asset no exception
is raised and a warning is generated. Same for unsupported assets"""
poloniex = function_scope_poloniex
def mock_api_return(url, req): # pylint: disable=unused-argument
response = MockResponse(
200,
POLONIEX_MOCK_DEPOSIT_WITHDRAWALS_RESPONSE,
)
return response
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
# Test that after querying the api only ETH and BTC assets are there
asset_movements = poloniex.query_deposits_withdrawals(
start_ts=0,
end_ts=1488994442,
end_at_least_ts=1488994442,
)
assert len(asset_movements) == 4
assert asset_movements[0].category == 'withdrawal'
assert asset_movements[0].timestamp == 1458994442
assert asset_movements[0].asset == A_BTC
assert asset_movements[0].amount == FVal('5.0')
assert asset_movements[0].fee == FVal('0.5')
assert asset_movements[1].category == 'withdrawal'
assert asset_movements[1].timestamp == 1468994442
assert asset_movements[1].asset == A_ETH
assert asset_movements[1].amount == FVal('10.0')
assert asset_movements[1].fee == FVal('0.1')
assert asset_movements[2].category == 'deposit'
assert asset_movements[2].timestamp == 1448994442
assert asset_movements[2].asset == A_BTC
assert asset_movements[2].amount == FVal('50.0')
assert asset_movements[3].category == 'deposit'
assert asset_movements[3].timestamp == 1438994442
assert asset_movements[3].asset == A_ETH
assert asset_movements[3].amount == FVal('100.0')
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == 4
assert 'Found withdrawal of unknown poloniex asset IDONTEXIST' in warnings[0]
assert 'Found withdrawal of unsupported poloniex asset DIS' in warnings[1]
assert 'Found deposit of unknown poloniex asset IDONTEXIST' in warnings[2]
assert 'Found deposit of unsupported poloniex asset EBT' in warnings[3]
@pytest.mark.parametrize('use_clean_caching_directory', [True])
def test_poloniex_deposits_withdrawal_null_fee(function_scope_poloniex):
"""
Test that if a poloniex asset movement query returns null for fee we don't crash.
Regression test for issue #76
"""
poloniex = function_scope_poloniex
def mock_api_return(url, req): # pylint: disable=unused-argument
response = MockResponse(
200,
'{"withdrawals": [{"currency": "FAC", "timestamp": 1478994442, '
'"amount": "100.5", "fee": null}], "deposits": []}',
)
return response
with patch.object(poloniex.session, 'post', side_effect=mock_api_return):
asset_movements = poloniex.query_deposits_withdrawals(
start_ts=0,
end_ts=1488994442,
end_at_least_ts=1488994442,
)
assert len(asset_movements) == 1
assert asset_movements[0].category == 'withdrawal'
assert asset_movements[0].timestamp == 1478994442
assert asset_movements[0].asset == Asset('FAIR')
assert asset_movements[0].amount == FVal('100.5')
assert asset_movements[0].fee == FVal('0')
warnings = poloniex.msg_aggregator.consume_warnings()
assert len(warnings) == 0
| [
"lefteris@refu.co"
] | lefteris@refu.co |
6a8b9e4ede272b664d0f795040983a39b0abec0f | d7016f69993570a1c55974582cda899ff70907ec | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2022_06_01/aio/_configuration.py | 432effa58c39c7f1021a6c274d2906adcb47ac1b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 3,545 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MonitorManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for MonitorManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MonitorManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-06-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-monitor/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
c26f65a8b4765d23a71a577bf9fdc2a895991866 | f411c70d2951f1744b7c5c47433593c5a1288f4d | /old_version_mysite (wegen bilder)-1a566232fd31fbf4038539de112513e859373364/blog/migrations/0009_auto_20200107_2049.py | f5e7596ffe53a3763b471b0fadf11444bb79f228 | [] | no_license | nevergofullretard/technikmax-website | b239c6cd378c196ab97c6141dd345434db795888 | 57c0ab44fc73bccc097df5d7003aaa38125e5413 | refs/heads/master | 2023-01-05T04:49:02.600988 | 2020-11-06T22:26:53 | 2020-11-06T22:26:53 | 249,065,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # Generated by Django 2.1.2 on 2020-01-07 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20200107_2040'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='project',
name='github',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='project',
name='title',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='project',
name='title_tag',
field=models.CharField(max_length=100),
),
]
| [
"jagermaxi1@gmail.com"
] | jagermaxi1@gmail.com |
5b70f0ee6f386b9458baaa5b140f0b18bed0f90b | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_6/models/file_info.py | 23b64849799a5104d64bb70df45f389369dbffa6 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,301 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.6, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_6 import models
class FileInfo(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'length': 'int'
}
attribute_map = {
'name': 'name',
'length': 'length'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
length=None, # type: int
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
length (int): Length of the file (in bytes).
"""
if name is not None:
self.name = name
if length is not None:
self.length = length
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileInfo`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
cbe0e65929a0840a5b46f3c6711671225d2c19c2 | 58dcd83b6af6e947328519e3e8e43e7e07dce1da | /tasks/ptf.py | ec7bac864be4098bec8c90868c7c6594d2265149 | [
"MIT"
] | permissive | astrocatalogs/kilonovae | ad10ba93b5c9676edb0ccf983d8ff770d4de7808 | 887742fdfc26a291c61056bbb3a420370c377584 | refs/heads/master | 2021-01-21T22:25:39.526071 | 2018-06-29T00:36:58 | 2018-06-29T00:36:58 | 102,157,542 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,887 | py | """Import tasks for the Palomar Transient Factory (PTF).
"""
import os
from astrocats.catalog.utils import is_number, pbar
from bs4 import BeautifulSoup
from ..kilonova import KILONOVA
def do_ptf(catalog):
# response =
# urllib.request.urlopen('http://wiserep.weizmann.ac.il/objects/list')
# bs = BeautifulSoup(response, 'html5lib')
# select = bs.find('select', {'name': 'objid'})
# options = select.findAll('option')
# for option in options:
# print(option.text)
# name = option.text
# if ((name.startswith('PTF') and is_number(name[3:5])) or
# name.startswith('PTFS') or name.startswith('iPTF')):
# name = catalog.add_entry(name)
task_str = catalog.get_current_task_str()
html = catalog.load_url('http://wiserep.weizmann.ac.il/spectra/update',
os.path.join(catalog.get_current_task_repo(),
'PTF/update.html'))
bs = BeautifulSoup(html, 'html5lib')
select = bs.find('select', {'name': 'objid'})
options = select.findAll('option')
for option in pbar(options, task_str):
name = option.text
if (((name.startswith('PTF') and is_number(name[3:5])) or
name.startswith('PTFS') or name.startswith('iPTF'))):
if '(' in name:
alias = name.split('(')[0].strip(' ')
name = name.split('(')[-1].strip(') ').replace('sn', 'SN')
if name == 'SNiauname': # A misentered entry
continue
name, source = catalog.new_entry(
name, bibcode='2012PASP..124..668Y')
catalog.entries[name].add_quantity(KILONOVA.ALIAS, alias,
source)
else:
# name = catalog.add_entry(name)
name, source = catalog.new_entry(
name, bibcode='2012PASP..124..668Y')
with open(
os.path.join(catalog.get_current_task_repo(),
'PTF/old-ptf-events.csv')) as f:
for suffix in pbar(f.read().splitlines(), task_str):
name = catalog.add_entry('PTF' + suffix)
with open(
os.path.join(catalog.get_current_task_repo(),
'PTF/perly-2016.csv')) as f:
for row in pbar(f.read().splitlines(), task_str):
cols = [x.strip() for x in row.split(',')]
alias = ''
if cols[8]:
name = cols[8]
alias = 'PTF' + cols[0]
else:
name = 'PTF' + cols[0]
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2016ApJ...830...13P')
catalog.entries[name].add_quantity(KILONOVA.ALIAS, name, source)
if alias:
catalog.entries[name].add_quantity(KILONOVA.ALIAS, alias,
source)
catalog.entries[name].add_quantity(KILONOVA.RA, cols[1], source)
catalog.entries[name].add_quantity(KILONOVA.DEC, cols[2], source)
catalog.entries[name].add_quantity(KILONOVA.CLAIMED_TYPE,
'SLSN-' + cols[3], source)
catalog.entries[name].add_quantity(
KILONOVA.REDSHIFT, cols[4], source, kind='spectroscopic')
maxdate = cols[6].replace('-', '/')
upl = maxdate.startswith('<')
catalog.entries[name].add_quantity(
KILONOVA.MAX_DATE,
maxdate.lstrip('<'),
source,
upperlimit=upl)
catalog.entries[name].add_quantity(
KILONOVA.EBV, cols[7], source, kind='spectroscopic')
name = catalog.add_entry('PTF' + suffix)
catalog.journal_entries()
return
| [
"guillochon@gmail.com"
] | guillochon@gmail.com |
443af4141f7802b5d3e978997b9dac8822173592 | 892dd32ee0be7135cd33c875b06dcc66307dcc99 | /automation/MPTS/backup/Accounts.py | 6fe7e7d76954193b0d4e99c8c2a8a0ba288108da | [] | no_license | cloudbytestorage/devops | 6d21ed0afd752bdde8cefa448d4433b435493ffa | b18193b08ba3d6538277ba48253c29d6a96b0b4a | refs/heads/master | 2020-05-29T08:48:34.489204 | 2018-01-03T09:28:53 | 2018-01-03T09:28:53 | 68,889,307 | 4 | 8 | null | 2017-11-30T08:11:39 | 2016-09-22T05:53:44 | Python | UTF-8 | Python | false | false | 1,866 | py | import json
import sys
import time
from cbrequest import sendrequest, filesave, timetrack, queryAsyncJobResult, configFile
config = configFile(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
######## To Add an Account for TSM -- Begins
print "Account Creation Begins"
timetrack("Account Creation Begins")
for x in range(1, int(config['Number_of_Accounts'])+1):
querycommand = 'command=createAccount&name=%s&description=%s' %(config['accountName%d' %(x)], config['accountDescription%d' %(x)])
resp_createAccount=sendrequest(stdurl,querycommand)
filesave("logs/AccountCreation.txt","w",resp_createAccount)
data = json.loads(resp_createAccount.text)
if not 'errorcode' in data['createaccountresponse']:
print "%s is created" %(config['accountName%d' %(x)])
account_id=data["createaccountresponse"]["account2"]["id"]
#creating Account User Authentication
name = "%sAUTH" %(config['accountName%d' %(x)])
user = "%suser" %(config['accountName%d' %(x)])
password = user
time.sleep(2);
querycommand ='command=addCIFSAuthGroup&accountid=%s&name=%s&comment=%s&username=%s&password=%s&fullname=%s' %(account_id, name,"Comment",user,password,"fullname")
resp_tsmcifsauthgroupresponse=sendrequest(stdurl,querycommand)
filesave("logs/AccountUserCreation.txt","w",resp_tsmcifsauthgroupresponse)
data = json.loads(resp_tsmcifsauthgroupresponse.text)
if not "errortext" in data["tsmcifsauthgroupresponse"]:
print "%s created" %(name)
else:
print "Error in creating %s : %s" %(name,data["tsmcifsauthgroupresponse"]["errortext"])
time.sleep(2);
else:
print "Error in creating %s : %s " %(config['accountName%d' %(x)],str(data['createaccountresponse']['errortext']))
| [
"karthik.s@cloudbyte.com"
] | karthik.s@cloudbyte.com |
1a543e20fb95abcd577d9003d76182a50bd8bce2 | f5ab9e3d4119bee183bf8f8bd8fb2f3fea755fc5 | /backend/home/migrations/0002_load_initial_data.py | d4bb919a45ea61c2c8b90e58cf799aa726e7d3a9 | [] | no_license | crowdbotics-apps/nyayo-wallet-18168 | ac0ad9e481a81055ef499b41eb86f88448675bd7 | 6b9c874b3176713c800dc623b3cb366cc504c585 | refs/heads/master | 2022-11-05T23:37:52.818433 | 2020-06-17T19:52:20 | 2020-06-17T19:52:20 | 273,065,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Nyayo Wallet"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Nyayo Wallet</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "nyayo-wallet-18168.botics.co"
site_params = {
"name": "Nyayo Wallet",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2fc4d12557de8db0b15da6c67806ef018129e119 | 6271171cbfab3e2b195b73cb936428adc6a1ca96 | /virtual/bin/pip3 | 8033ed37aafbc34b36d8141d1db9b65fd90cd886 | [] | no_license | UmuhireAnuarithe/Neighborhood | 7592f24d0f11ec77d7f46a92cdb9dced5e66dd11 | 60072d868433e38145b74cbe1cee06e16bf58266 | refs/heads/master | 2020-09-04T11:34:29.819137 | 2019-11-07T19:19:30 | 2019-11-07T19:19:30 | 218,758,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/home/wecode/Desktop/Umuhire/Neighborhood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"you@example.com"
] | you@example.com | |
a0323ac561a7a50abb306f498d8afa48cd00566e | d7fb8743b6faa4d948b2b08ca0dbdd3b0f11379b | /测试代码/theano/LSTMVRAE-master/VRAE.py | c61d81f248ad9a86b1b74adf9f6bfaf207556f16 | [] | no_license | bancheng/Stock-market | 219e9882858e6d10edad1d13fba67dadbedc27ba | 142ea0eaed0fdccd8e79a51c34d66d1be1c336ed | refs/heads/master | 2021-01-20T15:13:14.667022 | 2017-09-10T06:31:10 | 2017-09-10T06:31:10 | 90,737,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py | import numpy as np
from chainer import Variable, Chain
from chainer import functions as F
class LSTMVRAE(Chain):
"""
Class: LSTMVRAE
===============
Implements Variational Recurrent Autoencoders, described here: http://arxiv.org/pdf/1412.6581.pdf
This specific architecture uses a single-layer LSTM for both the encoder and the decoder.
"""
def __init__(self, n_input, n_hidden, n_latent, loss_func):
"""
:param n_input: number of input dimensions
:param n_hidden: number of LSTM cells for both generator and decoder
:param n_latent: number of dimensions for latent code (z)
:param loss_func: loss function to compute reconstruction error (e.g. F.mean_squared_error)
"""
self.__dict__.update(locals())
super(LSTMVRAE, self).__init__(
# Encoder (recognition):
recog_x_h=F.Linear(n_input, n_hidden*4),
recog_h_h=F.Linear(n_hidden, n_hidden*4),
recog_mean=F.Linear(n_hidden, n_latent),
recog_log_sigma=F.Linear(n_hidden, n_latent),
# Decoder (generation)
gen_z_h=F.Linear(n_latent, n_hidden*4),
gen_x_h=F.Linear(n_input, n_hidden*4),
gen_h_h=F.Linear(n_hidden, n_hidden*4),
output=F.Linear(n_hidden, n_input)
)
def make_initial_state(self):
"""Returns an initial state of the RNN - all zeros"""
return {
'h_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),
'c_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),
'h_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),
'c_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32))
}
def forward(self, x_data, state):
"""
Does encode/decode on x_data.
:param x_data: input data (a single timestep) as a numpy.ndarray
:param state: previous state of RNN
:param nonlinear_q: nonlinearity used in q(z|x) (encoder)
:param nonlinear_p: nonlinearity used in p(x|z) (decoder)
:param output_f: #TODO#
:return: output, recognition loss, KL Divergence, state
"""
#=====[ Step 1: Compute q(z|x) - encoding step, get z ]=====
# Forward encoding
for i in range(x_data.shape[0]):
x = Variable(x_data[i].reshape((1, x_data.shape[1])))
h_in = self.recog_x_h(x) + self.recog_h_h(state['h_rec'])
c_t, h_t = F.lstm(state['c_rec'], h_in)
state.update({'c_rec':c_t, 'h_rec':h_t})
# Compute q_mean and q_log_sigma
q_mean = self.recog_mean( state['h_rec'] )
q_log_sigma = 0.5 * self.recog_log_sigma( state['h_rec'] )
# Compute KL divergence based on q_mean and q_log_sigma
KLD = -0.0005 * F.sum(1 + q_log_sigma - q_mean**2 - F.exp(q_log_sigma))
# Compute as q_mean + noise*exp(q_log_sigma)
eps = Variable(np.random.normal(0, 1, q_log_sigma.data.shape ).astype(np.float32))
z = q_mean + F.exp(q_log_sigma) * eps
#=====[ Step 2: Compute p(x|z) - decoding step ]=====
# Initial step
output = []
h_in = self.gen_z_h(z)
c_t, h_t = F.lstm(state['c_gen'], h_in)
state.update({'c_gen':c_t, 'h_gen':h_t})
rec_loss = Variable(np.zeros((), dtype=np.float32))
for i in range(x_data.shape[0]):
# Get output and loss
x_t = self.output(h_t)
output.append(x_t.data)
rec_loss += self.loss_func(x_t, Variable(x_data[i].reshape((1, x_data.shape[1]))))
# Get next hidden state
h_in = self.gen_x_h(x_t) + self.gen_h_h(state['h_gen'])
c_t, h_t = F.lstm(state['c_gen'], h_in)
state.update({'c_gen':c_t, 'h_gen':h_t})
#=====[ Step 3: Compute KL-Divergence based on all terms ]=====
return output, rec_loss, KLD, state
| [
"tangdongge@buaa.edu.cn"
] | tangdongge@buaa.edu.cn |
28c55dd0c36cd14490b30996420bde8006459891 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1809541/homework02/program02.py | 219171cdc7d415d3f2ab468fa1f3b181bae13566 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | '''
Un file di compiti contiene informazioni su un insieme di compiti da eseguire.
Esistono due tipologie di compiti:
- compiti che possono essere eseguiti indipendentemente dagli altri.
- compiti da svolgere solo al termine di un compito preliminare.
I compiti del primo tipo sono codificati nel file mediante una linea che contiene
in sequenza le due sottostringhe "comp" ed "N" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
Compiti del secondo tipo sono codificati nel file mediante due linee di codice.
-- la prima linea, contiene in sequenza le due sottostringhe "comp" ed "N"
(senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
-- la seconda linea (immediatamente successiva nel file) contiene
in sequenza le due sottostringhe "sub" ed "M" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "M" e' l'ID del compito preliminare.
il seguente file di compiti contiene informazioni su 4 compiti (con identificativi 1,3,7 e 9).
I compiti con identificativi 1 e 9 possono essere svolti indipendentemente dagli altri mentre i compiti
con identificativo 3 e 7 hanno entrambi un compito preliminare.
comp 3
sub 9
comp1
comp 9
comp 7
sub3
Scrivere la funzione pianifica(fcompiti,insi,fout) che prende in input:
- il percorso di un file (fcompiti)
- un insieme di ID di compiti da cercare (insi)
- ed il percorso di un file (fout)
e che salva in formato JSON nel file fout un dizionario (risultato).
Il dizionario (risultato) dovra' contenere come chiavi gli identificativi (ID) dei compiti
presenti in fcompiti e richiesti nell'insieme insi.
Associata ad ogni ID x del dizionario deve esserci una lista contenente gli identificativi (ID) dei compiti
che bisogna eseguire prima di poter eseguire il compito x richiesto
(ovviamente la lista di un ID di un compito che non richie un compito preliminare risultera' vuota ).
Gli (ID) devono comparire nella lista nell'ordine di esecuzione corretto, dal primo fino a quello precedente a quello richiesto
(ovviamente il primo ID di una lista non vuota corripondera' sempre ad un compito che non richiede un compito preliminare).
Si puo' assumere che:
- se l' ID di un compito che richieda un compito preliminare e' presente in fcompiti
allora anche l'ID di quest'ultimo e' presente in fcompiti
- la sequenza da associare al compito ID del dizionario esiste sempre
- non esistono cicli (compiti che richiedono se' stessi anche indirettamente)
Ad esempio per il file di compiti fcompiti contenente:
comp 3
sub 9
comp1
comp 9
comp 7
sub3
al termine dell'esecuzione di pianifica(fcompiti,{'7','1','5'}, 'a.json')
il file 'a.json' deve contenere il seguente dizionario
{'7':['9','3'],'1':[]}
Per altri esempi vedere il file grade02.txt
AVVERTENZE:
non usare caratteri non ASCII, come le lettere accentate;
non usare moduli che non sono nella libreria standard.
NOTA: l'encoding del file e' 'utf-8'
ATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.
'''
def test(diz,insi):
result={}
lun=len(insi)
for i in range(0,lun):
lis=[]
if insi != set():
c=insi.pop()
if c in diz:
result[c]=lis
while c in diz and diz[c]!='':
lis.append(diz[c])
c=diz[c]
lis.reverse()
return result
def pianifica(fcompiti,insi,fout):
'''Implementare qui la funzione'''
from json import dump
lis=open(fcompiti,'r')
diz={}
for i in lis:
i=i.replace(' ','').replace('\n','')
arg=''
if 'sub' not in i:
key=i.replace('comp','')
diz[key]=arg
else:
diz[key]+=i.replace('sub','')
risultato=test(diz,insi)
js=open(fout,'w',encoding='utf-8')
dump(risultato,js)
js.close()
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
bd207e42fac6fcf7bc3bd69478759816700352a8 | 0c35b8b69fe899bf510826c52ab3171443acdaf3 | /sportshack/predictor/migrations/0001_initial.py | d15192eb788edc952ded02a54ed439eeb4969c95 | [
"MIT"
] | permissive | vaastav/SportsHack | c37c8e9b315142091ad9dbe6b50268880421a69d | 6d20d1abcb1d72659607c08e4a9aafc291162c58 | refs/heads/master | 2020-02-26T17:12:24.526099 | 2016-03-12T21:50:54 | 2016-03-12T21:50:54 | 47,007,457 | 0 | 0 | null | 2016-03-12T21:50:55 | 2015-11-28T02:44:14 | null | UTF-8 | Python | false | false | 3,843 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('game_id', models.IntegerField()),
('home_team', models.CharField(max_length=255)),
('away_team', models.CharField(max_length=255)),
('home_score', models.IntegerField()),
('away_score', models.IntegerField()),
('home_qt1', models.IntegerField()),
('home_qt2', models.IntegerField()),
('home_qt3', models.IntegerField()),
('home_qt4', models.IntegerField()),
('away_qt1', models.IntegerField()),
('away_qt2', models.IntegerField()),
('away_qt3', models.IntegerField()),
('away_qt4', models.IntegerField()),
('date', models.DateTimeField(blank=True, default=datetime.datetime(2015, 11, 29, 7, 18, 13, 49051))),
],
),
migrations.CreateModel(
name='Play',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('type_id', models.CharField(max_length=255)),
('success', models.IntegerField()),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('team', models.CharField(max_length=255)),
('touchdown', models.IntegerField()),
('points', models.IntegerField()),
('fumbles', models.IntegerField()),
('height', models.DecimalField(decimal_places=7, max_digits=10)),
('weight', models.IntegerField()),
('birthplace', models.CharField(max_length=255)),
('position', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Predictions',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('award_points', models.IntegerField()),
('play', models.ForeignKey(to='predictor.Play')),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('win', models.IntegerField()),
('loss', models.IntegerField()),
('points', models.IntegerField()),
('points_scored', models.IntegerField()),
('points_conceded', models.IntegerField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('points', models.IntegerField()),
('num_votes', models.IntegerField()),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)),
],
),
]
| [
"vaastav.anand05@gmail.com"
] | vaastav.anand05@gmail.com |
7191f92baf553381ff048a6940f844b83fc097a7 | ca064338a35104cc94c60b330fc9b60efee6a089 | /cabunicrisis/comparativegenomics/FastANI/FastANI_output_to_distance_matrix.py | 733ec67fa6482e409befb0df111fba5a6c1b4022 | [] | no_license | compgenomics2020/Team2-WebServer | 76d9c5e9ac01be84da2b19ee8c7cfcb088e24911 | 0e550cc523e93afd4417af688a1e9ada79ae489f | refs/heads/master | 2022-06-23T08:16:11.751133 | 2020-05-02T14:00:43 | 2020-05-02T14:00:43 | 263,474,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | #!/usr/bin/env python3
"""
Copyright 2019 by Jianshu Zhao (jansuechao@hotmail.com)
This script uses FastANI output to generate a PHYLIP distance matrix suitable for tree/headmap et.al.
"""
import argparse
import sys
def get_arguments():
parser = argparse.ArgumentParser(description='Distance matrix from pairwise identities')
parser.add_argument('identities', type=str,
help='FastANI output file (or similarly formatted file with three '
'whitespace-delimited columns of assembly 1, assembly 2, percent '
'identity')
parser.add_argument('--max_dist', type=float, required=False, default=1.0,
help='Maximum allowed genomic distance')
args = parser.parse_args()
return args
def main():
args = get_arguments()
clusters = set()
distances = {}
print('', file=sys.stderr)
print('Convert FastANI distances to PHYLIP matrix', file=sys.stderr)
print('------------------------------------------------', file=sys.stderr)
fastani_output_filename = args.identities
with open(fastani_output_filename, 'rt') as fastani_output:
for line in fastani_output:
parts = line.strip().split()
cluster_1 = parts[0]
cluster_2 = parts[1]
ani = float(parts[2])
if cluster_1 == cluster_2:
distance = 1.0
else:
distance = ani / 100.0
clusters.add(cluster_1)
clusters.add(cluster_2)
add_distance(distances, cluster_1, cluster_2, distance)
add_distance(distances, cluster_2, cluster_1, distance)
print('Found {} clusters and {} distances'.format(len(clusters), len(distances)),
file=sys.stderr)
print(len(clusters))
clusters = sorted(clusters)
for i in clusters:
print(i, end='')
for j in clusters:
print('\t', end='')
try:
distance = distances[(i, j)]
except KeyError:
distance = args.max_dist
if distance > args.max_dist:
distance = args.max_dist
print('%.6f' % distance, end='')
print()
print('', file=sys.stderr)
def add_distance(distances, cluster_1, cluster_2, distance):
# If this is the first time we've seen this pair, then we just add it to the dictionary.
if (cluster_1, cluster_2) not in distances:
distances[(cluster_1, cluster_2)] = distance
# If we've seen this pair before (the other way around), then we make sure the distances are
# close (sanity check) and then save the mean distance.
else:
assert abs(distance - distances[(cluster_1, cluster_2)]) < 0.1
distances[(cluster_1, cluster_2)] = (distances[(cluster_1, cluster_2)] + distance) / 2.0
if __name__ == '__main__':
main()
| [
"github-noreply@oit.gatech.edu"
] | github-noreply@oit.gatech.edu |
5090607a5ef39fea84b21da06b8f38aeeea43fb4 | 13d013cd5481ad47d48cf87750647f08a9c630ed | /melons.py | b5545fdc3526f8ce157ea918c5c488daa28dd367 | [] | no_license | marjanasarker/oo-melons | 7878de32f9ead132770c77570ae1410a7fe29ed3 | a75fcce861b1e4ea47896921f2fbc221d875764f | refs/heads/master | 2023-03-21T11:01:13.996561 | 2021-03-12T21:00:08 | 2021-03-12T21:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | """Classes for melon orders."""
class AbstractMelonOrder():
"""An abstract base class that other Melon Orders inherit from."""
def __init__(self, species, qty):
self.species = species
self.qty = qty
def get_total(self):
"""Calculate price, including tax."""
base_price = 5
if self.species == "ChristmasMelon":
base_price *= 1.5
if self.order_type == "International" and self.qty < 10:
base_price += 3
total = (1 + self.tax) * self.qty * base_price
return total
def mark_shipped(self):
"""Record the fact than an order has been shipped."""
self.shipped = True
class DomesticMelonOrder(AbstractMelonOrder):
"""A melon order within the USA."""
tax = 0.08
order_type = 'domestic'
def __init__(self, species, qty):
super().__init__(species, qty)
"""Initialize melon order attributes."""
self.shipped = False
#self.order_type = "domestic"
#self.tax = 0.08
class InternationalMelonOrder(AbstractMelonOrder):
"""An international (non-US) melon order."""
tax = 0.17
order_type = "international"
def __init__(self, species, qty, country_code):
super().__init__(species, qty)
"""Initialize melon order attributes."""
self.country_code = country_code
self.shipped = False
def get_country_code(self):
"""Return the country code."""
return self.country_code
class GovernmentMelonOrder(AbstractMelonOrder):
"""A U.S Government Tax-Free Melon Order"""
tax = 0
passed_inspection = False
order_type = "Government"
def __init__(self, species, qty):
super().__init__(species, qty)
self.marked_inspection = False
def marked_inspection(self):
"""Indicates if melon passed inspection"""
if self == "passed":
passed_inspection = True
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
87b129553779da9bda36a4a63c210538154e7ef6 | 2fa4007849c1ec0f9c009ba536887001217b7f9f | /sgkit_plink/tests/test_pysnptools.py | ac5df4ddbc65a1abad0d9c88df7aa98d4361d3b7 | [
"Apache-2.0"
] | permissive | eric-czech/sgkit-plink | 219195318836dcfbfbb6de9b720ef6f3e183f7be | 4f10c1b0b260fa80fe18c31d28a434c175e558cd | refs/heads/master | 2022-12-08T18:28:09.111583 | 2020-08-04T13:49:56 | 2020-08-04T13:49:56 | 276,471,757 | 0 | 0 | null | 2020-07-01T20:06:50 | 2020-07-01T20:06:50 | null | UTF-8 | Python | false | false | 3,100 | py | import numpy as np
import pytest
import xarray as xr
from sgkit_plink.pysnptools import read_plink
example_dataset_1 = "plink_sim_10s_100v_10pmiss"
@pytest.fixture(params=[dict()])
def ds1(shared_datadir, request):
path = shared_datadir / example_dataset_1
return read_plink(path=path, bim_sep="\t", fam_sep="\t", **request.param)
def test_read_multi_path(shared_datadir, ds1):
path = shared_datadir / example_dataset_1
ds2 = read_plink(
bed_path=path.with_suffix(".bed"),
bim_path=path.with_suffix(".bim"),
fam_path=path.with_suffix(".fam"),
bim_sep="\t",
fam_sep="\t",
)
xr.testing.assert_equal(ds1, ds2)
def test_raise_on_both_path_types():
with pytest.raises(
ValueError,
match="Either `path` or all 3 of `{bed,bim,fam}_path` must be specified but not both",
):
read_plink(path="x", bed_path="x")
def test_read_slicing(ds1):
gt = ds1["call_genotype"]
shape = gt.shape
assert gt[:3].shape == (3,) + shape[1:]
assert gt[:, :3].shape == shape[:1] + (3,) + shape[2:]
assert gt[:3, :5].shape == (3, 5) + shape[2:]
assert gt[:3, :5, :1].shape == (3, 5, 1)
@pytest.mark.parametrize("ds1", [dict(bim_int_contig=True)], indirect=True)
def test_read_int_contig(ds1):
# Test contig parse as int (the value is always "1" in .bed for ds1)
assert np.all(ds1["variant_contig"].values == 1)
assert ds1.attrs["contigs"] == ["1"]
@pytest.mark.parametrize("ds1", [dict(bim_int_contig=False)], indirect=True)
def test_read_str_contig(ds1):
# Test contig indexing as string (the value is always "1" in .bed for ds1)
assert np.all(ds1["variant_contig"].values == 0)
assert ds1.attrs["contigs"] == ["1"]
def test_read_call_values(ds1):
# Validate a few randomly selected individual calls
# (spanning all possible states for a call)
idx = np.array(
[
[50, 7],
[81, 8],
[45, 2],
[36, 8],
[24, 2],
[92, 9],
[26, 2],
[81, 0],
[31, 8],
[4, 9],
]
)
expected = np.array(
[
[1, 0],
[1, 0],
[1, 1],
[1, 1],
[-1, -1],
[0, 0],
[0, 0],
[1, 1],
[0, 0],
[0, 0],
]
)
gt = ds1["call_genotype"].values
actual = gt[tuple(idx.T)]
np.testing.assert_equal(actual, expected)
def test_read_stat_call_rate(ds1):
# Validate call rate for each sample
sample_call_rates = (
(ds1["call_genotype"] >= 0).max(dim="ploidy").mean(dim="variants").values
)
np.testing.assert_equal(
sample_call_rates, [0.95, 0.9, 0.91, 0.87, 0.86, 0.83, 0.86, 0.87, 0.92, 0.92]
)
def test_read_stat_alt_alleles(ds1):
# Validate alt allele sum for each sample
n_alt_alleles = (
ds1["call_genotype"].clip(0, 2).sum(dim="ploidy").sum(dim="variants").values
)
np.testing.assert_equal(n_alt_alleles, [102, 95, 98, 94, 88, 91, 90, 98, 96, 103])
| [
"eric.allen.czech@gmail.com"
] | eric.allen.czech@gmail.com |
46b4d533b36599eb8e094a265a17059b93efaa3b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/dwd.py | 503f139a6ae2d1ae779cbfb84e9ff0c633842534 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'dWD':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
7ef1801849d89765bd4444252fe2be738977662f | fe9d6ff3d7a39bb57a6ed7a973c0318d3a7aa189 | /chapters5/exe5_14.py | 30417bf07dc5fd973a3261b80db35c60467c31c2 | [] | no_license | yiguming/python_core_progreamming | 12cfca8b44b187a706c7dd7e1bb73ab3ef1f7552 | 9b7790938f33523c0cd4172b0d508e49bbddf17a | refs/heads/master | 2020-03-29T16:27:55.888601 | 2015-04-19T03:09:39 | 2015-04-19T03:09:39 | 31,845,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | #!/usr/bin/env python
def huibao(basemoney,rate):
totalmoney = basemoney*(1+rate)*365
huibao = totalmoney / float(basemoney)
print "The basemoney %.2f ,after a year it will be change %.2f ,and the huibaolv is %f"%(basemoney,totalmoney,huibao)
if __name__ == "__main__":
huibao(100.00,0.25)
| [
"439309415@qq.com"
] | 439309415@qq.com |
1067841ff7b2255d7deb7766bb7e37d66f3416ec | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/Wprime/Wprime_WZ_WlepZhad_narrow_M3000_13TeV-madgraph_cff.py | 5ea1fd9178d90686f424118336088b009a602725 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 769 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/91ab3ea30e3c2280e4c31fdd7072a47eb2e5bdaa/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_WZ_WlepZhad/Wprime_WZ_WlepZhad_narrow_M3000
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_WZ_WlepZhad/narrow/v2/Wprime_WZ_WlepZhad_narrow_M3000_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"sheffield@physics.rutgers.edu"
] | sheffield@physics.rutgers.edu |
323a93c9c5d16e668db2cef2445cd7fe25a23394 | 91ac6c1be33810c98cb49a18318603bcab1ff37d | /temp.py | 3d64b4e0a5fcfbe0c0c16eb54fdfc8bd8eb7b5bd | [] | no_license | antonylu/serial | 492a1e61cc65e9c32f7e49ff1cf1a2281b6c8045 | 141986c3c8a911d9560ab133e1e7488627094f71 | refs/heads/master | 2022-11-13T00:07:18.333268 | 2020-06-30T12:53:33 | 2020-06-30T12:53:33 | 275,778,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | import serial
import pygame
import json
BG_L_IMG = "bg.jpg"
BG_P_IMG = "bgp.jpg"
OPTION_JSON = "options.json"
class Temp():
def __init__(self):
pygame.init()
pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
self._surface = pygame.display.get_surface()
options = json.load(open(OPTION_JSON))
self.s = serial.Serial(options['COM_PORT'])
self.high_temp = float(options['HIGH_TEMP'])
self._running = True if self.s else False
self.w, self.h = self._surface.get_size()
self.portrait_mode = True if self.w < self.h else False
if self.portrait_mode:
self.bg = pygame.image.load(BG_P_IMG)
self.text_center = (int(self.w/2), 590)
self.font = pygame.font.SysFont('consolas', 360)
else: # landscape mode
self.bg = pygame.image.load(BG_L_IMG)
self.text_center = (int(self.w/2), int(self.h/2)+50)
self.font = pygame.font.SysFont('consolas', 512)
pygame.mouse.set_visible(False)
self.alarm = pygame.mixer.Sound('balarm.wav')
def run(self):
while self._running:
self._handle_events()
self._get_temperature()
self._redraw()
pygame.quit()
def _get_temperature(self) -> None:
self.temp = self.s.readline().decode('ascii').strip()
#print(self.temp)
def _redraw(self) -> None:
tt = float(self.temp)
if tt > self.high_temp:
self.alarm.play()
text = self.font.render(self.temp, 1, (255, 0, 0))
else:
text = self.font.render(self.temp, 1, (255, 255, 255))
self.alarm.stop()
text_rect = text.get_rect(center=self.text_center)
self._surface.blit(self.bg, (0,0))
self._surface.blit(text, text_rect)
#self._surface.fill(pygame.Color(41,36,33))
pygame.display.update()
def _handle_events(self) -> None:
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
self._running = False
if __name__ == '__main__':
sn = Temp().run()
| [
"w3back@gmail.com"
] | w3back@gmail.com |
2f69b6682d0e02c84f16d9a1392c5798407925f3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_desisted.py | dc89e70ec3bbf682ce15215f48ef5b72a515f1d5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _DESISTED():
def __init__(self,):
self.name = "DESISTED"
self.definitions = desist
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['desist']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
11bec3b555107fc73d0e2a16654cd5dc9d63550a | 7e4460c85790fae2d470182732289bcd1b8777b2 | /Process/process_skins.py | 3a30a41c954eafa1f3fe1ae717d05245e99ec506 | [] | no_license | khamukkamu/swconquest-msys | 5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e | 71337a4ae9c507b9440e84cf49d31fc67a781978 | refs/heads/master | 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null | UTF-8 | Python | false | false | 3,719 | py | import string
from process_common import *
from module_info import *
from module_skins import *
from module_info import wb_compile_switch as is_wb_skin
import string
from process__swyhelper import *
# WARNING: The following should be the same as the number in face_generator.h
num_voice_types = 2
#####################
def replace_spaces(s0):
return string.replace(s0," ","_")
def write_face_tex(ofile,tex_set):
ofile.write(" %d "%len(tex_set))
for tex in tex_set:
color = tex[1]
hair_mats = tex[2]
hair_colors = []
if len(tex) > 3:
hair_colors = tex[3]
ofile.write(" %s %d %d %d "%(tex[0],color, len(hair_mats), len(hair_colors)))
for hair_mat in hair_mats:
ofile.write(" %s "%(replace_spaces(hair_mat)))
for hair_color in hair_colors:
ofile.write(" %d "%(hair_color))
ofile.write("\n")
def write_textures(ofile,tex_set):
ofile.write(" %d "%len(tex_set))
for tex in tex_set:
ofile.write(" %s "%tex)
ofile.write("\n")
def write_voices(ofile, voices):
ofile.write(" %d "%(len(voices)))
for voice_rec in voices:
ofile.write(" %d %s "%(voice_rec[0],voice_rec[1]))
ofile.write("\n")
def export_skins(skins):
ofile = open(export_dir + "skins.txt","w")
ofile.write("skins_file version 1\n")
ofile.write("%d\n"%len(skins))
if len(skins) > 29:
skins = skins[0:30]
for skin in skins:
if is_wb_skin:
#swy-- convert tuple to list to make it writable
skin = list(skin)
#swy--
for i, mesh in enumerate(skin):
if type(mesh) is str and mesh == "_":
skin[i] = "dummy_mesh"
skin_name = skin[0]
skin_flags = skin[1]
body_name = skin[2]
calf_name = skin[3]
hand_name = skin[4]
head_mesh = skin[5]
face_keys = skin[6]
hair_meshes = skin[7]
beard_meshes = skin[8]
hair_textures = skin[9]
beard_textures = skin[10]
face_textures = skin[11]
voices = skin[12]
skeleton_name = skin[13]
scale = skin[14]
blood_particles_1 = 0
blood_particles_2 = 0
constraints = []
if len(skin) > 15:
blood_particles_1 = skin[15]
if len(skin) > 16:
blood_particles_2 = skin[16]
if len(skin) > 17:
constraints = skin[17]
ofile.write("%s %d\n %s %s %s\n"%(skin_name, skin_flags, body_name, calf_name, hand_name))
ofile.write(" %s %d "%(head_mesh,len(face_keys)))
for face_key in face_keys:
ofile.write("skinkey_%s %d %d %s %s %s "%(convert_to_identifier(face_key[4]), face_key[0],face_key[1],swytrailzro(face_key[2]),swytrailzro(face_key[3]),replace_spaces(face_key[4])))
ofile.write("\n%d\n"%len(hair_meshes))
for mesh_name in hair_meshes:
ofile.write(" %s "%mesh_name)
ofile.write("\n %d\n"%len(beard_meshes))
for bmn in beard_meshes:
ofile.write(" %s\n"%bmn)
ofile.write("\n")
write_textures(ofile,hair_textures)
write_textures(ofile,beard_textures)
write_face_tex(ofile,face_textures)
write_voices(ofile, voices)
ofile.write(" %s %s "%(skeleton_name, swytrailzro(scale)))
ofile.write("\n%d %d\n"%(blood_particles_1, blood_particles_2))
ofile.write("%d\n"%(len(constraints)))
for constraint in constraints:
ofile.write("\n%s %d %d "%(swytrailzro(constraint[0]), constraint[1], (len(constraint) - 2)))
for i_pair in xrange(len(constraint)):
if i_pair > 1:
ofile.write(" %s %d"%(swytrailzro(constraint[i_pair][0]), constraint[i_pair][1]))
ofile.write("\n")
ofile.close()
print "Exporting skins..."
export_skins(skins)
| [
"swyterzone@gmail.com"
] | swyterzone@gmail.com |
417b7c78684e01503bc3ad7a901d8c6be1916817 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4379/codes/1649_2446.py | ef3d63fb97cf206c3f0439f899c69140cd7791fa | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | num=float(input("senha (6 digitos): "))
n1=num//100000
n2=(num//10000)%10
n3=(num//1000)%10
n4=(num//100)%10
n5=(num//10)%10
n6=num%10
if ((n2+n4+n6)%(n1+n3+n5)==0):
print("acesso liberado")
else:
print("senha invalida")
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
cccc38104fcdd3214cf991fab273dde1f1d0454d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_088/ch35_2020_09_16_23_12_26_483631.py | 9df7bf2f9250ad03afdea874bfc0681b95417cdd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | continua=True
soma=0
while(continua):
numero=int(input("digite o numero"))
soma+=numero
if(numero==0):
continua =False
print(soma) | [
"you@example.com"
] | you@example.com |
9aa5e52832cf35b3e5921015b4c55e33c4e5b7dd | b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd | /High_Frequency/Dynamic Programming/Course Schedule IV/dfs+memo.py | de84bcc80eaf5dd410518179c7f1e887dc08076f | [] | no_license | atomextranova/leetcode-python | 61381949f2e78805dfdd0fb221f8497b94b7f12b | 5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56 | refs/heads/master | 2021-07-15T20:32:12.592607 | 2020-09-21T00:10:27 | 2020-09-21T00:10:27 | 207,622,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | class Solution:
"""
@param n: an integer, denote the number of courses
@param p: a list of prerequisite pairs
@return: return an integer,denote the number of topologicalsort
"""
def topologicalSortNumber(self, n, p):
post_to_pres = []
cur_list = [str(i) for i in range(n)]
memo = {}
for i in range(n):
post_to_pres.append(set())
for (pre, post) in p:
post_to_pres[post].add(pre)
# return self.dfs(post_to_pres, post_to_count, cur_list, memo)
result = self.dfs(post_to_pres, cur_list, memo)
return result
def dfs(self, post_to_pres, cur_list, memo):
if len(cur_list) == 0:
return 1
key = "".join(cur_list)
if key in memo:
return memo[key]
memo[key] = 0
for i in range(len(cur_list)):
cur_course = int(cur_list[i])
if len(post_to_pres[cur_course]) != 0:
continue
next_list = cur_list[:i] + cur_list[i+1:]
remove_list = []
for course in next_list:
course = int(course)
if cur_course in post_to_pres[course]:
post_to_pres[course].remove(cur_course)
remove_list.append(course)
memo[key] += self.dfs(post_to_pres, next_list, memo)
for course in remove_list:
post_to_pres[course].add(cur_course)
return memo[key] | [
"atomextranova@gmail.com"
] | atomextranova@gmail.com |
fd24ce8e6ab3237a43786af7a37372084eb42eb7 | de4aa86038fb75778b4e6d0e7bb07fc78cf28a0e | /__init__.py | ca2ddad9a898b8b1ae13ba3007b43eafaef53bfb | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | radaniba/jcvi | 718d8b87fc5a5e748841a37f5e100cfa2c232347 | 04d457ea7231897d547ea0bd51b011fe7412f171 | refs/heads/master | 2020-12-31T06:22:16.125846 | 2014-09-18T16:58:43 | 2014-09-18T16:58:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | __author__ = ("Haibao Tang", "Vivek Krishnakumar", "Jingping Li", "Maria Kim")
__copyright__ = "Copyright (c) 2010-2014, Haibao Tang"
__email__ = "tanghaibao@gmail.com"
__license__ = "BSD"
__status__ = "Development"
__version__ = "0.4.9"
| [
"tanghaibao@gmail.com"
] | tanghaibao@gmail.com |
c25e501bc52ff5180701dcea6403201d44cce97f | 747f759311d404af31c0f80029e88098193f6269 | /addons/analytic_multicurrency/__init__.py | 0e6e6997632a278ae3ef25b661e98b49709bee8e | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | /home/openerp/production/extra-addons/analytic_multicurrency/__init__.py | [
"geerish@omerp.net"
] | geerish@omerp.net |
66477801ae526dcb4fcbcda849c5be6f6bcbe983 | 46dbafdf6608f5c92b0a821a6f6e68c1de64c5bb | /knowledge/global_utils.py | be48a45db958bb2f295384e3bbdbd06e2cee203b | [] | no_license | lcwy220/knowledge_revised | b89db6dc6ec9a713e057930ee63405db4b284534 | d53f47ec27bd452a77ade2c0e2a03b8c3136b937 | refs/heads/master | 2021-01-22T17:52:39.539096 | 2017-03-18T06:27:46 | 2017-03-18T06:27:46 | 85,040,924 | 0 | 0 | null | 2017-03-15T07:13:37 | 2017-03-15T07:13:37 | null | UTF-8 | Python | false | false | 16,661 | py | # -*-coding:utf-8-*-
import redis
from py2neo import Graph
from elasticsearch import Elasticsearch
from global_config import *
# user profile info
es_user_profile = Elasticsearch(user_profile_host, timeout=600)
profile_index_name = "weibo_user"
profile_index_type = "user"
# user portrait system
es_user_portrait = Elasticsearch(user_portrait_host, timeout=600)
es_bci = Elasticsearch(user_profile_host, timeout=600)
# es_social_sensing
es_social_sensing_text = Elasticsearch(social_sensing_text, timeout=600)
sensing_compute_interval = 2*3600
#recommendation task
es_recommendation_result = Elasticsearch(user_portrait_host, timeout=600)
es_retweet = Elasticsearch(retweet_comment_es_host, timeout=600)
es_comment = Elasticsearch(retweet_comment_es_host, timeout = 600)
be_es_retweet = Elasticsearch(retweet_comment_es_host, timeout=600)
be_es_comment = Elasticsearch(retweet_comment_es_host, timeout = 600)
ES_CLUSTER_FLOW1 = Elasticsearch(bci_es_host, timeout = 600)
es_tag = Elasticsearch(user_portrait_host, timeout=600)
# flow text system
es_flow_text = Elasticsearch(flow_text_host, timeout=600)
# social sensing
es_social_sensing_text = Elasticsearch(social_sensing_text, timeout=600)
# km user portrait
es_km_user_portrait = Elasticsearch(km_user_portrait_host,timeout=600)
# km event
es_event = Elasticsearch(event_host, timeout=600)
# The process state is stored
es_calculate_status = Elasticsearch(calculate_status_host, timeout=600)
graph = Graph(neo4j_data_path, user=neo4j_name, password=neo4j_password)
r = redis.StrictRedis(host=redis_host, port=redis_port, db=0)
# user portrait interface: push user into redis list
r_user = redis.StrictRedis(host=redis_host, port=redis_port, db=10)
r_user_hash_name = 'user2portrait'
r_user_update_hash_name = 'user_update'
r_user_update_long_hash_name = 'user_update_long'
#jln event redis
topic_queue_name='EVENT_portrait_task'
flow_text_index_name_pre = 'flow_text_' # flow text: 'flow_text_2013-09-01'
flow_text_index_type = 'text'
portrait_index_name = 'user_portrait_0312' # user portrait
portrait_index_type = 'user'
# week retweet/be_retweet relation es
retweet_index_name_pre = '1225_retweet_' # retweet: 'retweet_1' or 'retweet_2'
retweet_index_type = 'user'
be_retweet_index_name_pre = '1225_be_retweet_' #be_retweet: 'be_retweet_1'/'be_retweet_2'
be_retweet_index_type = 'user'
# week comment/be_comment relation es
comment_index_name_pre = '1225_comment_'
comment_index_type = 'user'
be_comment_index_name_pre = '1225_be_comment_'
be_comment_index_type = 'user'
#es for bci history
bci_history_index_name = 'bci_history'
bci_history_index_type = 'bci'
#recommendation_user
recommendation_index_name = 'recommendation_in_user'
recommendation_index_type = 'user'
bci_day_pre = 'bci_'
bci_day_type = 'bci'
# es for tag
tag_index_name = 'custom_attribute'
tag_index_type = 'attribute'
def _default_es_cluster_flow1(host=ES_CLUSTER_HOST_FLOW1):
es = Elasticsearch(host, timeout=60, retry_on_timeout=True, max_retries=6)
return es
# 存储user_portrait的重要度/活跃度/影响力和敏感度,与es_flow1一致
ES_COPY_USER_PORTRAIT = _default_es_cluster_flow1(host=ES_COPY_USER_PORTAIT_HOST)
COPY_USER_PORTRAIT_INFLUENCE = "copy_user_portrait_influence"
COPY_USER_PORTRAIT_INFLUENCE_TYPE = 'bci'
COPY_USER_PORTRAIT_IMPORTANCE = "copy_user_portrait_importance"
COPY_USER_PORTRAIT_IMPORTANCE_TYPE = 'importance'
COPY_USER_PORTRAIT_ACTIVENESS = "copy_user_portrait_activeness"
COPY_USER_PORTRAIT_ACTIVENESS_TYPE = 'activeness'
COPY_USER_PORTRAIT_SENSITIVE = "copy_user_portrait_sensitive"
COPY_USER_PORTRAIT_SENSITIVE_TYPE = 'sensitive'
#recommendation_in
ES_DAILY_RANK = _default_es_cluster_flow1(host=ES_COPY_USER_PORTAIT_HOST)
# es for activeness history, influence history and pagerank
#copy_portrait_index_name = 'user_portrait_1222'#'this_is_a_copy_user_portrait'
copy_portrait_index_name = 'this_is_a_copy_user_portrait'
copy_portrait_index_type = 'user'
#neo4j
graph = Graph(neo4j_data_path, user=neo4j_name, password=neo4j_password)
#neo4j查询事件名
# event2id
def event_name_to_id(en_name):
query_body = {
"query":{
"match":{
'name':en_name
}
}
}
name_results = es_event.search(index=event_name, doc_type=event_type, \
body=query_body,fields=['en_name'])['hits']['hits'][0]['fields']
for k,v in name_results.iteritems():
ch_name = v[0]
return ch_name
# event_search_sth
def es_search_sth(en_name,fields_list):
print fields_list
query_body = {
"query":{
"match":{
'en_name':en_name
}
}
}
sth_results = es_event.search(index=event_analysis_name, doc_type=event_type, \
body=query_body,fields=fields_list)['hits']['hits'][0]['fields']
for k,v in sth_results.iteritems():
sth_name = v[0]
return sth_name
#es:事件id查找事件名
def event_name_search(en_name):
query_body = {
"query":{
"match":{
'_id':en_name
}
}
}
name_results = es_event.search(index=event_name, doc_type=event_type, \
body=query_body,fields=['name'])['hits']['hits'][0]['fields']
for k,v in name_results.iteritems():
ch_name = v[0]
return ch_name
#查找uid对应的字段
def user_search_sth(en_name,fields_list):
query_body = {
"query":{
"match":{
'_id':en_name
}
}
}
try:
name_results = es_user_portrait.search(index=portrait_name, doc_type=portrait_type, \
body=query_body, fields=fields_list)['hits']['hits'][0]['fields']
except:
name_dict = {}
for i in fields_list:
name_dict[i] =''
return name_dict
name_dict = {}
for k,v in name_results.iteritems():
name_dict[k] = v[0]
# print ch_name.encode('utf-8')
return name_dict
#查找uid对应的名字
def user_name_search(en_name):
query_body = {
"query":{
"match":{
'_id':en_name
}
}
}
try:
name_results = es_user_portrait.search(index=portrait_name, doc_type=portrait_type, \
body=query_body, fields=['uname'])['hits']['hits'][0]['fields']
except:
return ''
for k,v in name_results.iteritems():
ch_name = v[0]
# print ch_name.encode('utf-8')
return ch_name
#查找该专题下事件关联的用户信息,用户卡片
def related_user_search(uid_list,sort_flag):
query_body = {
'query':{
'terms':{'uid':uid_list}
},
'size':200,
"sort": [{sort_flag:'desc'}]
}
fields_list = ['activeness', 'influence','sensitive','uname','fansnum',\
'domain','topic_string','user_tag','uid','photo_url','activity_geo_aggs', 'statusnum']
event_detail = es_user_portrait.search(index=portrait_name, doc_type=portrait_type, \
body=query_body, _source=False, fields=fields_list)['hits']['hits']
detail_result = []
for i in event_detail:
fields = i['fields']
detail = dict()
for i in fields_list:
try:
detail[i] = fields[i][0]
except:
detail[i] = 'null'
detail_result.append(detail)
return detail_result
# 查找该专题下的包含事件卡片信息,事件卡片
def event_detail_search(eid_list,sort_flag):
query_body = {
'query':{
'terms':{'en_name':eid_list}
},
'size':100,
"sort": [{sort_flag:'desc'}]
}
fields_list = ['name', 'en_name', 'weibo_counts','start_ts','location','uid_counts','user_tag','description','photo_url']
event_detail = es_event.search(index=event_analysis_name, doc_type=event_type, \
body=query_body, _source=False, fields=fields_list)['hits']['hits']
detail_result = []
for i in event_detail:
fields = i['fields']
detail = dict()
for i in fields_list:
try:
detail[i] = fields[i][0]
except:
detail[i] = 'null'
detail_result.append(detail)
return detail_result
#jln
R_CLUSTER_FLOW1 = redis.StrictRedis(host=REDIS_CLUSTER_HOST_FLOW1, port=REDIS_CLUSTER_PORT_FLOW1)
R_CLUSTER_FLOW2 = redis.StrictRedis(host=REDIS_CLUSTER_HOST_FLOW2, port=REDIS_CLUSTER_PORT_FLOW2)
######
R_CLUSTER_FLOW3 = redis.StrictRedis(host=REDIS_CLUSTER_HOST_FLOW2, port=REDIS_CLUSTER_PORT_FLOW2)
def _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=1):
return redis.StrictRedis(host, port, db)
redis_flow_text_mid = _default_redis(host=REDIS_TEXT_MID_HOST, port=REDIS_TEXT_MID_PORT, db=2)
redis_host_list = ["1", "2"]
#use to save retweet/be_retweet
retweet_r_1 = _default_redis(host=RETWEET_REDIS_HOST,port=RETWEET_REDIS_PORT, db=1)
retweet_r_2 = _default_redis(host=RETWEET_REDIS_HOST, port=RETWEET_REDIS_PORT, db=2)
retweet_redis_dict = {'1':retweet_r_1, '2':retweet_r_2}
#use to save comment/be_comment
comment_r_1 = _default_redis(host=COMMENT_REDIS_HOST, port=COMMENT_REDIS_PORT, db=1)
comment_r_2 = _default_redis(host=COMMENT_REDIS_HOST, port=COMMENT_REDIS_PORT, db=2)
comment_redis_dict = {'1':comment_r_1, '2':comment_r_2}
#use to save network retweet/be_retweet
daily_retweet_redis = _default_redis(host=REDIS_CLUSTER_HOST_FLOW1,port=REDIS_CLUSTER_PORT_FLOW1,db=4)
R_0 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=0)
R_1 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=1)
R_2 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=2)
R_3 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=3)
R_4 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=4)
R_5 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=5)
R_6 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=6)
R_7 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=7)
R_8 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=8)
R_9 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=9)
R_10 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=10)
R_11 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=11)
#bci_history jln
R_12 = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=12)
R_DICT = {'0':R_0, '1':R_1, '2':R_2, '3':R_3, '4':R_4, '5':R_5, '6':R_6, '7':R_7,\
'8':R_8, '9':R_9, '10':R_10, '11':R_11, '12':R_12}
R_SENTIMENT_ALL = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=11)
#use to save user domain in user_portrait
R_DOMAIN = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=12)
r_domain_name = 'user_domain'
#use to save user topic in user_portrait
R_TOPIC = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=13)
r_topic_name = 'user_topic'
#use to save domain sentiment trend
R_DOMAIN_SENTIMENT = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=12)
r_domain_sentiment_pre = 'sentiment_domain_'
#use to save topic sentiment trend
R_TOPIC_SENTIMENT = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=13)
r_topic_sentiment_pre = 'sentiment_topic_'
#use to save sentiment keywords task information to redis queue
R_SENTIMENT_KEYWORDS = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=10)
r_sentiment_keywords_name = 'sentiment_keywords_task'
#use to save sentiment keywords task information to redis queue
R_NETWORK_KEYWORDS = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=10)
r_network_keywords_name = 'network_keywords_task'
# social sensing redis
R_SOCIAL_SENSING = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=14)
topic_queue_name = 'topics_task'
#jln add topic computing in db15
R_ADMIN = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=15)
#use to write portrait user list to redis as queue for update_day and update_week
update_day_redis = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=5)
UPDATE_DAY_REDIS_KEY = 'update_day'
update_week_redis = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=5)
UPDATE_WEEK_REDIS_KEY = 'update_week'
update_month_redis = _default_redis(host=REDIS_HOST, port=REDIS_PORT, db=5)
UPDATE_MONTH_REDIS_KEY = 'update_month'
#recommendation_in
R_RECOMMENTATION = _default_redis(host=REDIS_CLUSTER_HOST_FLOW1, port=REDIS_CLUSTER_PORT_FLOW1, db=1)
r_recommendation_in_now = 'recommendation_in_now'
r_recommendation_in_after = 'recommendation_in_after'
'''
# elasticsearch initialize, one for user_profile, one for user_portrait
es_user_profile = Elasticsearch(USER_PROFILE_ES_HOST, timeout = 600)
es_bci_history = Elasticsearch(USER_PROFILE_ES_HOST, timeout=600)
es_sensitive = Elasticsearch(USER_PROFILE_ES_HOST, timeout=600)
es_user_portrait = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 6000)
es_social_sensing = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_prediction = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_flow_text = Elasticsearch(FLOW_TEXT_ES_HOST, timeout=600)
es_group_result = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout=1000)
es_retweet = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_comment = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_be_comment = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_copy_portrait = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_tag = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout=600)
es_sentiment_task = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_network_task = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_rank_task = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout = 600)
es_operation = Elasticsearch(USER_PORTRAIT_ES_HOST, timeout=600)
# elasticsearch index_name and index_type
profile_index_name = 'weibo_user' # user profile es
profile_index_type = 'user'
portrait_index_name = 'user_portrait_1222' # user portrait
portrait_index_type = 'user'
flow_text_index_name_pre = 'flow_text_' # flow text: 'flow_text_2013-09-01'
flow_text_index_type = 'text'
# week retweet/be_retweet relation es
retweet_index_name_pre = '1225_retweet_' # retweet: 'retweet_1' or 'retweet_2'
retweet_index_type = 'user'
be_retweet_index_name_pre = '1225_be_retweet_' #be_retweet: 'be_retweet_1'/'be_retweet_2'
be_retweet_index_type = 'user'
# week comment/be_comment relation es
comment_index_name_pre = '1225_comment_'
comment_index_type = 'user'
be_comment_index_name_pre = '1225_be_comment_'
be_comment_index_type = 'user'
# es for activeness history, influence history and pagerank
#copy_portrait_index_name = 'user_portrait_1222'#'this_is_a_copy_user_portrait'
copy_portrait_index_name = 'this_is_a_copy_user_portrait'
copy_portrait_index_type = 'user'
# es for group detect and analysis
group_index_name = 'group_manage'
group_index_type = 'group'
# es for sentiment keywords task
sentiment_keywords_index_name = 'sentiment_keywords_task'
sentiment_keywords_index_type = 'sentiment'
# es for social sensing
sensing_index_name = 'manage_sensing_task'
sensing_doc_type = 'task'
#es for bci history
bci_history_index_name = 'bci_history'
bci_history_index_type = 'bci'
#es_sensitive
sensitive_index_name = 'sensitive_history'
sensitive_index_type = 'sensitive'
# 存储user_portrait的重要度/活跃度/影响力和敏感度,与es_flow1一致
ES_COPY_USER_PORTRAIT = _default_es_cluster_flow1(host=ES_COPY_USER_PORTAIT_HOST)
COPY_USER_PORTRAIT_INFLUENCE = "copy_user_portrait_influence"
COPY_USER_PORTRAIT_INFLUENCE_TYPE = 'bci'
COPY_USER_PORTRAIT_IMPORTANCE = "copy_user_portrait_importance"
COPY_USER_PORTRAIT_IMPORTANCE_TYPE = 'importance'
COPY_USER_PORTRAIT_ACTIVENESS = "copy_user_portrait_activeness"
COPY_USER_PORTRAIT_ACTIVENESS_TYPE = 'activeness'
COPY_USER_PORTRAIT_SENSITIVE = "copy_user_portrait_sensitive"
COPY_USER_PORTRAIT_SENSITIVE_TYPE = 'sensitive'
'''
'''
jln:query_to_es
2016.8.8
'''
def getTopicByNameStEt(topic,start_date,end_date):
query_body = {
'query':{
'bool':{
'must':[
{'term':{'start_ts':start_date}},
{'term':{'end_ts':end_date}},
{'term':{'name':topic}}
]
}
}
}
search_result = topic_es.search(index=topic_index_name,doc_type=topic_index_type,body=query_body)['hits']['hits']
return search_result
def getWeiboByNameStEt(topic,start_date,end_date):
print weibo_es
query_body= {
'query':{
'filtered':{
'filter':{
'range':{'timestamp':{'gte':start_date,'lte':end_date}}
}
}
}
}
search_result = weibo_es.search(index=topic,doc_type=weibo_index_type,body=query_body)
print search_result
return search_result
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
fbec9c53646965e194c6b50aab16bc86dd1e842c | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/com_google_fhir/external/pypi__tensorboard_1_12_1/tensorboard/_vendor/bleach/callbacks.py | 2abfbc730279ddbb38613aacfa75e49d7aea49ac | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 150 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorboard_1_12_1/tensorboard/_vendor/bleach/callbacks.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
b116d249bb6b8d8d11168a3c5583bf091dcdc466 | 4e13248d569f3d2ba30519e45d7479d8764f84a2 | /lib/plot.py | c68d5a227c947ff42198e2c4592f12990837bf71 | [] | no_license | SunnerLi/Cup2 | 84df0b07df5875f20e0480b7032fe982bb8b4a79 | 8c8c8c0864a4f4b02f1496bb8e91970a04d5c6d0 | refs/heads/master | 2021-06-24T10:07:35.597057 | 2017-09-12T03:08:22 | 2017-09-12T03:08:22 | 93,631,639 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,753 | py | from collections import Counter, defaultdict
from config import *
import numpy as np
import time
import cv2
# Mapping object (Auto-generated)
obj_index_2_name = {index: name for name, index in obj_name_2_index.iteritems()}
# Other variable (Auto-generated)
kind = len(obj_name_2_index)
grid_height = None
grid_width = None
# Dilation kernel
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
def binaryEdgeMapToRed(img):
"""
Change the laplacian edge image into red image
Arg: img - The laplacian edge image
Ret: The red edge image
"""
red = np.zeros([np.shape(img)[0], np.shape(img)[1], 3])
red[..., 2] = img
return red
def coverEdge(origin_img, edge_img):
"""
Cover the original image with laplacian edge image
* Notice: Since this function used bitwise operator to split the edge region.
As the result, the region will not work if the pixel is white.
Arg: origin_img - The original image
edge_img - The edge image after doing the laplacian process
Ret: The image with edge covered
"""
res = np.copy(origin_img)
edge_map_inv = cv2.bitwise_not(edge_img)
img_bg = cv2.bitwise_and(res, res, mask=edge_map_inv)
img_fg = cv2.bitwise_and(binaryEdgeMapToRed(edge_img), binaryEdgeMapToRed(edge_img), mask=edge_img)
res = cv2.add(img_bg.astype(np.uint8), img_fg.astype(np.uint8))
return res
def mergeSegmentAndScoringRes(img, result_segment, result_scoring):
"""
Merge the segment and scoring result into the original image
Arg: img - The original image
result_segment - The predict result after conducting the UNet
result_scoring - The predict result after conducting the scoring net
Ret: The image with merge result
"""
# Copy image to prevent revised the original one
res_img = np.copy(img)
# Do the connected component
result_segment = cv2.dilate(result_segment, kernel)
result_segment = result_segment.astype(np.uint8)
num_segment, label_map, component_info_list, centroids = cv2.connectedComponentsWithStats(
result_segment, 4, cv2.CV_32S)
# Generate grid variable and form the vector to the original shape
grid_height = np.shape(img)[0] / grid_height_num
grid_width = np.shape(img)[1] / grid_width_num
scores = np.reshape(result_scoring, [kind, grid_height_num, grid_width_num])
# Plot the ROI binary map
has_response_map = np.zeros([grid_height_num, grid_width_num])
for i in range(grid_height_num):
for j in range(grid_width_num):
for k in range(kind):
if scores[k][i][j] != 0.0:
has_response_map[i][j] = 1
break
# Create bucket
component_bucket = [[None]] * np.max(label_map)
for i in range(len(component_bucket)):
component_bucket[i] = np.zeros(kind)
# ----------------------------------------------------------------------------------
# Collect score
# ----------------------------------------------------------------------------------
class_map = np.argmax(scores, axis=0)
for i in range(grid_height_num):
for j in range(grid_width_num):
if has_response_map[i][j] == 1:
# Determine grid point coordinate tuple
grid_p1 = (j * grid_width, i * grid_height)
grid_p2 = (j * grid_width + grid_width, i * grid_height + grid_height)
# Get the frequent for each component
mapping_componenet_2_freq = Counter()
for k in range(grid_p1[1], grid_p2[1]):
for m in range(grid_p1[0], grid_p2[0]):
if result_segment[k][m] != 0:
if not label_map[k][m] in mapping_componenet_2_freq:
mapping_componenet_2_freq[label_map[k][m]] = 1
else:
mapping_componenet_2_freq[label_map[k][m]] += 1
# Get the most frequent class
freq_class = mapping_componenet_2_freq.most_common(1)
if len(freq_class) != 0:
freq_class = freq_class[0][0] - 1 # !!??
# Add result into bucket
_score = scores[class_map[i][j]][i][j]
component_bucket[freq_class][class_map[i][j]] += _score
# Voting
for i in range(len(component_bucket)):
component_bucket[i] = np.argmax(component_bucket[i], axis=0)
# ----------------------------------------------------------------------------------
# Plot the result of segmentation
# ----------------------------------------------------------------------------------
_, edge_graph = cv2.threshold(result_segment, 127, 255, cv2.THRESH_BINARY)
_, contour, __ = cv2.findContours(edge_graph, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(res_img, contour, -1, (0, 0, 255), 1)
# ----------------------------------------------------------------------------------
# Ploting Bounding box and classification
# (Select first 5th region)
# ----------------------------------------------------------------------------------
for i in range(num_segment - 1):
bbox_p1 = (component_info_list[i + 1][cv2.CC_STAT_LEFT], component_info_list[i + 1][cv2.CC_STAT_TOP])
bbox_p2 = (bbox_p1[0] + component_info_list[i + 1][cv2.CC_STAT_WIDTH],
bbox_p1[1] + component_info_list[i + 1][cv2.CC_STAT_HEIGHT])
text_p = (int(round(0.5 * bbox_p1[0] + 0.5 * bbox_p2[0])), bbox_p1[1])
cent_p = (text_p[0], int(round(0.5 * bbox_p1[1] + 0.5 * bbox_p2[1])))
exam_extra_p1 = (bbox_p1[0], bbox_p2[1])
exam_extra_p2 = (bbox_p2[0], bbox_p1[1])
if has_response_map[exam_extra_p1[1] / grid_height][exam_extra_p1[0] / grid_width] != 0 or \
has_response_map[exam_extra_p2[1] / grid_height][exam_extra_p2[0] / grid_width] != 0 or \
has_response_map[cent_p[1] / grid_height][cent_p[0] / grid_width] != 0 or \
has_response_map[bbox_p1[1] / grid_height][bbox_p1[0] / grid_width] != 0 or \
has_response_map[bbox_p2[1] / grid_height][bbox_p2[0] / grid_width] != 0:
class_index = component_bucket[i]
cv2.rectangle(res_img, bbox_p1, bbox_p2, obj_index_2_response_color_tuple[class_index], thickness=2)
cv2.putText(res_img, obj_index_2_name[class_index], text_p, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
else:
cv2.rectangle(res_img, bbox_p1, bbox_p2, (0, 0, 50), thickness=2)
return res_img | [
"a6214123@gmail.com"
] | a6214123@gmail.com |
4046b3d62d1ce23666ab43c8a41a59234f840fd4 | 2850d9adba96bc4e73185de5d6adebf363a5c534 | /tce/tcloud/cvm/RebootInstances.py | 6b68f7510fb20afa20eecb430276d6bf55bdddb3 | [
"Apache-2.0"
] | permissive | FatAnker/tencentcloud-sdk-python | d8f757b12ad336e78a06b68a789ecc3c86d1d331 | d6f75a41dc7053cb51f9091f4d41b8cb7a837559 | refs/heads/master | 2020-04-30T22:34:16.740484 | 2019-04-28T11:14:11 | 2019-04-28T11:14:11 | 177,122,691 | 0 | 1 | null | 2019-03-22T10:46:01 | 2019-03-22T10:46:01 | null | UTF-8 | Python | false | false | 2,131 | py | # -*- coding: utf-8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.cvm.v20170312 import cvm_client, models
import json
# 导入可选配置类
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import ssl
from tce.tcloud.utils.config import global_config
ssl._create_default_https_context = ssl._create_unverified_context
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain = params['domain']
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(secretId, secretKey)
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm."+domain
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
# 实例化要请求产品(以cvm为例)的client对象,clientProfile是可选的。
client = cvm_client.CvmClient(cred, region, clientProfile)
# 实例化一个cvm实例信息查询请求对象,每个接口都会对应一个request对象。
req = models.RebootInstancesRequest()
# 这里还支持以标准json格式的string来赋值请求参数的方式。下面的代码跟上面的参数赋值是等效的。
params = '{"InstanceIds":["ins-i4ekkudx","ins-gwggvy39"]}'
req.from_json_string(params)
# 通过client对象调用DescribeInstances方法发起请求。注意请求方法名与请求对象是对应的。
# 返回的resp是一个DescribeInstancesResponse类的实例,与请求对象对应。
resp = client.RebootInstances(req)
# 输出json格式的字符串回包
print(resp.to_json_string())
# 也可以取出单个值。
# 你可以通过官网接口文档或跳转到response对象的定义处查看返回字段的定义。
# print(resp.TotalCount)
except TencentCloudSDKException as err:
print(err) | [
"1113452717@qq.com"
] | 1113452717@qq.com |
fefac9fb3ef1ecc86facfe365495ab0f28693881 | 87b006149b16a3028385fc58cf781f5a12c94ad9 | /PyFunceble/checker/syntax/second_lvl_domain.py | e01845520774e59a1dd0e96aa7033158eb539543 | [
"Apache-2.0"
] | permissive | spirillen/PyFunceble | 04d03b2678ad46ec81c520a32df5397832414451 | 3c8f62062bffa0e16d465c150a853af8bf2f2205 | refs/heads/master | 2023-05-12T04:32:04.587521 | 2022-11-20T11:19:06 | 2022-11-20T11:19:06 | 237,827,167 | 2 | 0 | Apache-2.0 | 2021-01-27T10:09:59 | 2020-02-02T19:50:47 | Python | UTF-8 | Python | false | false | 4,696 | py | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the second level domain syntax checker.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/latest/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2022 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
from PyFunceble.checker.syntax.domain_base import DomainSyntaxCheckerBase
from PyFunceble.helpers.regex import RegexHelper
class SecondLvlDomainSyntaxChecker(DomainSyntaxCheckerBase):
"""
Provides an interface to check the syntax of a second domain.
:param str subject:
Optional, The subject to work with.
"""
# pylint: disable=line-too-long
REGEX_VALID_DOMAIN: str = r"^(?=.{0,253}$)(([a-z0-9][a-z0-9-]{0,61}[a-z0-9]|[a-z0-9])\.)+((?=.*[^0-9])([a-z0-9][a-z0-9-]{0,61}[a-z0-9](?:\.)?|[a-z0-9](?:\.)?))$"
REGEX_VALID_RELAXED_DOMAIN: str = r"^(?=.{0,253}$)(([a-z0-9][a-z0-9_-]{0,61}[a-z0-9_-]|[a-z0-9])\.)+((?=.*[^0-9])([a-z0-9][a-z0-9-]{0,61}[a-z0-9](?:\.)?|[a-z0-9](?:\.)?))$"
last_point_index: Optional[int] = None
"""
Saves the index of the last point.
"""
@DomainSyntaxCheckerBase.ensure_subject_is_given
def is_valid(self) -> bool:
"""
Validate the given subject.
.. warning::
A valid domain may also be a valid subdomain.
If you precisely want to check a subdomain please refer to the
right checker (not this one :-) )!
"""
# pylint: disable=too-many-return-statements
extension = self.get_extension()
if not extension or (
extension not in self.iana_dataset
and extension not in self.SPECIAL_USE_DOMAIN_NAMES_EXTENSIONS
):
return False
subject_without_extension = self.idna_subject[: self.last_point_index]
subject_without_suffix, _ = self.get_subject_without_suffix(
self.idna_subject, extension
)
if subject_without_suffix:
if "." in subject_without_suffix:
return False
return RegexHelper(self.REGEX_VALID_DOMAIN).match(
self.idna_subject, return_match=False
) or RegexHelper(self.REGEX_VALID_RELAXED_DOMAIN).match(
self.idna_subject, return_match=False
)
if "." in subject_without_extension:
return False
return RegexHelper(self.REGEX_VALID_DOMAIN).match(
self.idna_subject, return_match=False
)
| [
"contact@funilrys.com"
] | contact@funilrys.com |
9634edfec32f0f9cfa846e90a764fa87057766cf | a20f9643cc79d2ce4fe69176b4439ce5855fdab4 | /backend/songbird_18676/urls.py | db249b68d533e4dd80a645b2e807acbfe92cbca3 | [] | no_license | crowdbotics-apps/songbird-18676 | bde32b1206d3f045403e2ef9e609254e33761994 | 09490a0d9438df7219164cf0c84437a69596c5a6 | refs/heads/master | 2022-11-14T19:46:35.980627 | 2020-07-07T20:58:39 | 2020-07-07T20:58:39 | 277,920,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | """songbird_18676 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Songbird"
admin.site.site_title = "Songbird Admin Portal"
admin.site.index_title = "Songbird Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Songbird API",
default_version="v1",
description="API documentation for Songbird App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
99ac83b99ddb015a8af7fe01b944afb27dfa0dd2 | 84226827016bf833e843ebce91d856e74963e3ed | /tests/integration/modules/boto_sns.py | 8bff3a65f280a9b4b69437e07f0a875772f21f3b | [
"Apache-2.0"
] | permissive | jbq/pkg-salt | ad31610bf1868ebd5deae8f4b7cd6e69090f84e0 | b6742e03cbbfb82f4ce7db2e21a3ff31b270cdb3 | refs/heads/master | 2021-01-10T08:55:33.946693 | 2015-05-21T13:41:01 | 2015-05-21T13:41:01 | 36,014,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | # -*- coding: utf-8 -*-
'''
Validate the boto_sns module
'''
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
import integration
NO_BOTO_MODULE = True
BOTO_NOT_CONFIGURED = True
try:
import boto
NO_BOTO_MODULE = False
try:
boto.connect_iam()
BOTO_NOT_CONFIGURED = False
except boto.exception.NoAuthHandlerFound:
pass
except ImportError:
pass
@skipIf(
NO_BOTO_MODULE,
'Please install the boto library before running boto integration tests.'
)
@skipIf(
BOTO_NOT_CONFIGURED,
'Please setup boto AWS credentials before running boto integration tests.'
)
class BotoSNSTest(integration.ModuleCase):
def test_exists(self):
ret = self.run_function('boto_sns.exists', ['nonexistent'])
self.assertFalse(ret)
def test_create(self):
ret = self.run_function('boto_sns.create', ['my-test-topic'])
self.assertTrue(ret)
def test_delete(self):
ret = self.run_function('boto_sns.delete', ['my-test-topic'])
self.assertTrue(ret)
def test_get_all_topics(self):
self.run_function('boto_sns.create', ['my-test-topic'])
self.run_function('boto_sns.create', ['my-second-test-topic'])
ret = self.run_function('boto_sns.get_all_topics')
self.assertIn('my-test-topic', ret.keys())
self.assertIn(self._get_arn('my-test-topic'), ret.values())
self.assertIn('my-second-test-topic', ret.keys())
self.assertIn(self._get_arn('my-second-test-topic'), ret.values())
def _get_arn(self, name):
return 'arn:aws:sns:us-east-1:{0}:{1}'.format(self.account_id, name)
@property
def account_id(self):
if not hasattr(self, '_account_id'):
account_id = self.run_function('boto_iam.get_account_id')
setattr(self, '_account_id', account_id)
return self._account_id
| [
"joehealy@gmail.com"
] | joehealy@gmail.com |
2ae43110afb9d1103a7ace2557660dbaa671a972 | 58df224689ab08c99359b1a6077d2fba3728dc61 | /lamda-ocr/merge-files/borb/pdf/canvas/event/begin_text_event.py | edadfc51f9880306c13f85e943a3463f2f3669f3 | [] | no_license | LIT-Midas/LITHackathon | 2b286728c156d79d3f426f6d19b160a2a04690db | 7b990483dd48b91cf3ec3452b78ab67770da71af | refs/heads/main | 2023-08-13T05:22:59.373965 | 2021-08-16T01:09:49 | 2021-08-16T01:09:49 | 395,024,729 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This implementation of Event is triggered when an BT instruction is being processed.
"""
from borb.pdf.canvas.event.event_listener import Event
class BeginTextEvent(Event):
"""
This implementation of Event is triggered when an BT instruction is being processed.
"""
pass
| [
"trevordino@gmail.com"
] | trevordino@gmail.com |
5feb3012f0d839b4153bd825a19ad07f4b45e4f4 | 36b75aac4236e928e22552e8812abd45d32aecf1 | /modules/dbnd/src/dbnd/_core/task_ctrl/task_dag_describe.py | a9c956e79acc37da5af6f130546fa1f05224cdb3 | [
"Apache-2.0"
] | permissive | reloadbrain/dbnd | 7793aa1864f678005de626068b0ac9361d637d65 | ec0076f9a142b20e2f7afd886ed1a18683c553ec | refs/heads/master | 2023-09-01T08:04:09.486666 | 2021-10-14T16:43:00 | 2021-10-14T16:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,355 | py | from __future__ import print_function
import logging
from dbnd._core.constants import DescribeFormat
from dbnd._core.errors import DatabandSystemError
from dbnd._core.settings import DescribeConfig
from dbnd._core.task_ctrl.task_ctrl import TaskSubCtrl
from dbnd._core.utils.basics.helpers import indent
from dbnd._vendor.termcolor import colored
from dbnd.tasks import DataSourceTask
logger = logging.getLogger(__name__)
def tasks_trail(tasks):
task_ids = [t.task_id for t in tasks]
return " -> ".join(task_ids)
class DescribeDagCtrl(TaskSubCtrl):
def __init__(self, task, describe_format=DescribeFormat.long, complete_status=None):
super(DescribeDagCtrl, self).__init__(task)
self.describe_format = describe_format
# dummy implementation of complete cache
self._complete_status = complete_status or {}
@property
def config(self):
return self.settings.describe
def tree_view(self, describe_format=None):
"""
Shows an ascii tree representation of the DAG
"""
# TODO: change to treelib implementation
seen = set()
def get_downstream(task, level=0):
task_desc = self._describe_task(task, describe_format=describe_format)
if task in seen:
return [(level, "%s (*)" % task_desc)]
result = [(level, task_desc)]
seen.add(task)
level += 1
count = 0
task_dag = task.ctrl.task_dag
for t in task_dag.upstream:
count += 1
if isinstance(t, DataSourceTask):
continue
if count > 30:
result.append((level, "..(%s tasks).." % len(task_dag.upstream)))
break
result.extend(get_downstream(t, level))
return result
result = get_downstream(self.task)
messages = [indent(msg, "\t" * level) for level, msg in result]
logger.info(
"Tasks Graph - (*) represent existing node in the graph run "
"(green is completed, yellow is going to be submitted):\n%s",
"\n".join(messages),
)
def list_view(self):
logger.info("List View of the DAG:\n")
for t in self.task_dag.subdag_tasks():
logger.info("%s\n" % self._describe_task(t))
def _get_task_complete(self, task):
if task.task_id not in self._complete_status:
try:
complete = task._complete()
except Exception as ex:
logger.warning(
"Failed to get complete status for %s: %s", task.task_id, ex
)
complete = None
self._complete_status[task.task_id] = complete
return self._complete_status[task.task_id]
def _describe_task(self, task, describe_format=None, msg=None, color=None):
describe_format = describe_format or self.describe_format
describe_config = self.config # type: DescribeConfig
msg = msg or ""
if color is None:
color = "white"
if not describe_config.no_checks:
color = "green" if self._get_task_complete(task) else "cyan"
if describe_format == DescribeFormat.short:
return colored(str(task.task_id), color)
if (
describe_format == DescribeFormat.long
or describe_format == DescribeFormat.verbose
):
title = "%s - %s" % (task.task_name, task.task_id)
if task.task_name != task.get_task_family():
title += "(%s)" % task.get_task_family()
if msg:
title += ": %s" % msg
return task.ctrl.visualiser.banner(
title, color=color, verbose=describe_format == DescribeFormat.verbose
)
raise DatabandSystemError("Not supported format mode %s" % self.describe_format)
def describe_dag(self):
# print short tree
self.tree_view(describe_format=DescribeFormat.short)
self.list_view()
def describe(self, as_tree=False):
if as_tree:
from dbnd._core.constants import DescribeFormat
self.tree_view(describe_format=DescribeFormat.short)
else:
self.ctrl.describe_dag.list_view()
| [
"roman.slipchenko@databand.ai"
] | roman.slipchenko@databand.ai |
95565b6038b246017520d2f8a8fd5549545a9860 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/account_helpers/__init__.py | c17ac8288f39b7019429fc00e8831b5f8f10f203 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,047 | py | # 2017.05.04 15:20:20 Střední Evropa (letní čas)
# Embedded file name: scripts/client/account_helpers/__init__.py
import datetime
import BigWorld
from constants import ACCOUNT_ATTR
from account_helpers.AccountSettings import AccountSettings, GOLD_FISH_LAST_SHOW_TIME
from shared_utils.account_helpers import BattleResultsCache, ClientClubs
from shared_utils.account_helpers import ClientInvitations
from helpers.time_utils import getCurrentTimestamp
def __checkAccountAttr(attrs, attrID):
return attrs is not None and attrs & attrID != 0
def isPremiumAccount(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.PREMIUM)
def isMoneyTransfer(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.TRADING)
def isDemonstrator(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.ARENA_CHANGE)
def isRoamingEnabled(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.ROAMING)
def isOutOfWallet(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.OUT_OF_SESSION_WALLET)
def isClanEnabled(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.CLAN)
def getPremiumExpiryDelta(expiryTime):
check = datetime.datetime.utcfromtimestamp(expiryTime)
now = datetime.datetime.utcnow()
return check - now
def convertGold(gold):
return gold
def getPlayerID():
return getattr(BigWorld.player(), 'id', 0)
def getAccountDatabaseID():
return getattr(BigWorld.player(), 'databaseID', 0)
def isLongDisconnectedFromCenter():
return getattr(BigWorld.player(), 'isLongDisconnectedFromCenter', False)
def getAccountHelpersConfig(manager):
""" Configures services for package gui.
:param manager: helpers.dependency.DependencyManager.
"""
from account_helpers import settings_core
manager.install(settings_core.getSettingsCoreConfig)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\account_helpers\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:20:20 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
ca486408ec1dad11aa30a35e2c012cbbef64c774 | aaa762ce46fa0347cdff67464f56678ea932066d | /AppServer/lib/django-1.2/tests/regressiontests/forms/localflavor/id.py | cb346ef7213c8b9f9b3ac7246e6189dda91db151 | [
"Apache-2.0",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
] | permissive | obino/appscale | 3c8a9d8b45a6c889f7f44ef307a627c9a79794f8 | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | refs/heads/master | 2022-10-01T05:23:00.836840 | 2019-10-15T18:19:38 | 2019-10-15T18:19:38 | 16,622,826 | 1 | 0 | Apache-2.0 | 2022-09-23T22:56:17 | 2014-02-07T18:04:12 | Python | UTF-8 | Python | false | false | 6,972 | py | from django.contrib.localflavor.id.forms import (IDPhoneNumberField,
IDPostCodeField, IDNationalIdentityNumberField, IDLicensePlateField,
IDProvinceSelect, IDLicensePlatePrefixSelect)
from utils import LocalFlavorTestCase
class IDLocalFlavorTests(LocalFlavorTestCase):
def test_IDProvinceSelect(self):
f = IDProvinceSelect()
out = u'''<select name="provinces">
<option value="BLI">Bali</option>
<option value="BTN">Banten</option>
<option value="BKL">Bengkulu</option>
<option value="DIY">Yogyakarta</option>
<option value="JKT">Jakarta</option>
<option value="GOR">Gorontalo</option>
<option value="JMB">Jambi</option>
<option value="JBR">Jawa Barat</option>
<option value="JTG">Jawa Tengah</option>
<option value="JTM">Jawa Timur</option>
<option value="KBR">Kalimantan Barat</option>
<option value="KSL">Kalimantan Selatan</option>
<option value="KTG">Kalimantan Tengah</option>
<option value="KTM">Kalimantan Timur</option>
<option value="BBL">Kepulauan Bangka-Belitung</option>
<option value="KRI">Kepulauan Riau</option>
<option value="LPG" selected="selected">Lampung</option>
<option value="MLK">Maluku</option>
<option value="MUT">Maluku Utara</option>
<option value="NAD">Nanggroe Aceh Darussalam</option>
<option value="NTB">Nusa Tenggara Barat</option>
<option value="NTT">Nusa Tenggara Timur</option>
<option value="PPA">Papua</option>
<option value="PPB">Papua Barat</option>
<option value="RIU">Riau</option>
<option value="SLB">Sulawesi Barat</option>
<option value="SLS">Sulawesi Selatan</option>
<option value="SLT">Sulawesi Tengah</option>
<option value="SLR">Sulawesi Tenggara</option>
<option value="SLU">Sulawesi Utara</option>
<option value="SMB">Sumatera Barat</option>
<option value="SMS">Sumatera Selatan</option>
<option value="SMU">Sumatera Utara</option>
</select>'''
self.assertEqual(f.render('provinces', 'LPG'), out)
def test_IDLicensePlatePrefixSelect(self):
f = IDLicensePlatePrefixSelect()
out = u'''<select name="codes">
<option value="A">Banten</option>
<option value="AA">Magelang</option>
<option value="AB">Yogyakarta</option>
<option value="AD">Surakarta - Solo</option>
<option value="AE">Madiun</option>
<option value="AG">Kediri</option>
<option value="B">Jakarta</option>
<option value="BA">Sumatera Barat</option>
<option value="BB">Tapanuli</option>
<option value="BD">Bengkulu</option>
<option value="BE" selected="selected">Lampung</option>
<option value="BG">Sumatera Selatan</option>
<option value="BH">Jambi</option>
<option value="BK">Sumatera Utara</option>
<option value="BL">Nanggroe Aceh Darussalam</option>
<option value="BM">Riau</option>
<option value="BN">Kepulauan Bangka Belitung</option>
<option value="BP">Kepulauan Riau</option>
<option value="CC">Corps Consulate</option>
<option value="CD">Corps Diplomatic</option>
<option value="D">Bandung</option>
<option value="DA">Kalimantan Selatan</option>
<option value="DB">Sulawesi Utara Daratan</option>
<option value="DC">Sulawesi Barat</option>
<option value="DD">Sulawesi Selatan</option>
<option value="DE">Maluku</option>
<option value="DG">Maluku Utara</option>
<option value="DH">NTT - Timor</option>
<option value="DK">Bali</option>
<option value="DL">Sulawesi Utara Kepulauan</option>
<option value="DM">Gorontalo</option>
<option value="DN">Sulawesi Tengah</option>
<option value="DR">NTB - Lombok</option>
<option value="DS">Papua dan Papua Barat</option>
<option value="DT">Sulawesi Tenggara</option>
<option value="E">Cirebon</option>
<option value="EA">NTB - Sumbawa</option>
<option value="EB">NTT - Flores</option>
<option value="ED">NTT - Sumba</option>
<option value="F">Bogor</option>
<option value="G">Pekalongan</option>
<option value="H">Semarang</option>
<option value="K">Pati</option>
<option value="KB">Kalimantan Barat</option>
<option value="KH">Kalimantan Tengah</option>
<option value="KT">Kalimantan Timur</option>
<option value="L">Surabaya</option>
<option value="M">Madura</option>
<option value="N">Malang</option>
<option value="P">Jember</option>
<option value="R">Banyumas</option>
<option value="RI">Federal Government</option>
<option value="S">Bojonegoro</option>
<option value="T">Purwakarta</option>
<option value="W">Sidoarjo</option>
<option value="Z">Garut</option>
</select>'''
self.assertEqual(f.render('codes', 'BE'), out)
def test_IDPhoneNumberField(self):
error_invalid = [u'Enter a valid phone number']
valid = {
'0812-3456789': u'0812-3456789',
'081234567890': u'081234567890',
'021 345 6789': u'021 345 6789',
'0213456789': u'0213456789',
'+62-21-3456789': u'+62-21-3456789',
'(021) 345 6789': u'(021) 345 6789',
}
invalid = {
'0123456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-0812-3456789': error_invalid,
'0812345678901': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPhoneNumberField, valid, invalid)
def test_IDPostCodeField(self):
error_invalid = [u'Enter a valid post code']
valid = {
'12340': u'12340',
'25412': u'25412',
' 12340 ': u'12340',
}
invalid = {
'12 3 4 0': error_invalid,
'12345': error_invalid,
'10100': error_invalid,
'123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPostCodeField, valid, invalid)
def test_IDNationalIdentityNumberField(self):
error_invalid = [u'Enter a valid NIK/KTP number']
valid = {
' 12.3456.010178 3456 ': u'12.3456.010178.3456',
'1234560101783456': u'12.3456.010178.3456',
'12.3456.010101.3456': u'12.3456.010101.3456',
}
invalid = {
'12.3456.310278.3456': error_invalid,
'00.0000.010101.0000': error_invalid,
'1234567890123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDNationalIdentityNumberField, valid, invalid)
def test_IDLicensePlateField(self):
error_invalid = [u'Enter a valid vehicle license plate number']
valid = {
' b 1234 ab ': u'B 1234 AB',
'B 1234 ABC': u'B 1234 ABC',
'A 12': u'A 12',
'DK 12345 12': u'DK 12345 12',
'RI 10': u'RI 10',
'CD 12 12': u'CD 12 12',
}
invalid = {
'CD 10 12': error_invalid,
'CD 1234 12': error_invalid,
'RI 10 AB': error_invalid,
'B 12345 01': error_invalid,
'N 1234 12': error_invalid,
'A 12 XYZ': error_invalid,
'Q 1234 AB': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDLicensePlateField, valid, invalid)
| [
"root@lucid64.hsd1.ca.comcast.net"
] | root@lucid64.hsd1.ca.comcast.net |
3cb085700726c9c2db2df74c836ba5909727f8b0 | fdb9bdc6c4ab2f14ba71e544493706d5e275899f | /fhir/resources/parameters.py | 9a9ccb1bbdb7e5fb404d489eef37604a6f157d26 | [
"BSD-3-Clause"
] | permissive | nazrulworld/fhir.resources | 6ae8aea8180c611b0c5050759c6dcdf63e4cb061 | 1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3 | refs/heads/main | 2023-08-30T18:27:27.277249 | 2023-07-03T19:57:06 | 2023-07-03T19:57:06 | 165,297,877 | 256 | 83 | NOASSERTION | 2023-08-24T15:34:05 | 2019-01-11T19:26:41 | Python | UTF-8 | Python | false | false | 37,058 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Parameters
Release: R5
Version: 5.0.0
Build ID: 2aecd53
Last updated: 2023-03-26T15:21:02.749+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, fhirtypes, resource
class Parameters(resource.Resource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Operation Request or Response.
This resource is used to pass information into and back from an operation
(whether invoked directly from REST or within a messaging environment). It
is not persisted or allowed to be referenced by other resources.
"""
resource_type = Field("Parameters", const=True)
parameter: typing.List[fhirtypes.ParametersParameterType] = Field(
None,
alias="parameter",
title="Operation Parameter",
description="A parameter passed to or received from the operation.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``Parameters`` according specification,
with preserving original sequence order.
"""
return ["id", "meta", "implicitRules", "language", "parameter"]
class ParametersParameter(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Operation Parameter.
A parameter passed to or received from the operation.
"""
resource_type = Field("ParametersParameter", const=True)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name from the definition",
description="The name of the parameter (reference to the operation definition).",
# if property is element of this resource.
element_property=True,
element_required=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
part: typing.List[fhirtypes.ParametersParameterType] = Field(
None,
alias="part",
title="Named part of a multi-part parameter",
description="A named part of a multi-part parameter.",
# if property is element of this resource.
element_property=True,
)
resource: fhirtypes.ResourceType = Field(
None,
alias="resource",
title="If parameter is a whole resource",
description="Conveys the content if the parameter is a whole resource.",
# if property is element of this resource.
element_property=True,
)
valueAddress: fhirtypes.AddressType = Field(
None,
alias="valueAddress",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueAge: fhirtypes.AgeType = Field(
None,
alias="valueAge",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueAnnotation: fhirtypes.AnnotationType = Field(
None,
alias="valueAnnotation",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueAttachment: fhirtypes.AttachmentType = Field(
None,
alias="valueAttachment",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueAvailability: fhirtypes.AvailabilityType = Field(
None,
alias="valueAvailability",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueBase64Binary: fhirtypes.Base64Binary = Field(
None,
alias="valueBase64Binary",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueBase64Binary__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueBase64Binary",
title="Extension field for ``valueBase64Binary``.",
)
valueBoolean: bool = Field(
None,
alias="valueBoolean",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueBoolean__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueBoolean", title="Extension field for ``valueBoolean``."
)
valueCanonical: fhirtypes.Canonical = Field(
None,
alias="valueCanonical",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueCanonical__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueCanonical", title="Extension field for ``valueCanonical``."
)
valueCode: fhirtypes.Code = Field(
None,
alias="valueCode",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueCode__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueCode", title="Extension field for ``valueCode``."
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueCodeableReference: fhirtypes.CodeableReferenceType = Field(
None,
alias="valueCodeableReference",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueCoding: fhirtypes.CodingType = Field(
None,
alias="valueCoding",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueContactDetail: fhirtypes.ContactDetailType = Field(
None,
alias="valueContactDetail",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueContactPoint: fhirtypes.ContactPointType = Field(
None,
alias="valueContactPoint",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueCount: fhirtypes.CountType = Field(
None,
alias="valueCount",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDataRequirement: fhirtypes.DataRequirementType = Field(
None,
alias="valueDataRequirement",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDate: fhirtypes.Date = Field(
None,
alias="valueDate",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDate", title="Extension field for ``valueDate``."
)
valueDateTime: fhirtypes.DateTime = Field(
None,
alias="valueDateTime",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDateTime", title="Extension field for ``valueDateTime``."
)
valueDecimal: fhirtypes.Decimal = Field(
None,
alias="valueDecimal",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDecimal__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueDecimal", title="Extension field for ``valueDecimal``."
)
valueDistance: fhirtypes.DistanceType = Field(
None,
alias="valueDistance",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDosage: fhirtypes.DosageType = Field(
None,
alias="valueDosage",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueDuration: fhirtypes.DurationType = Field(
None,
alias="valueDuration",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueExpression: fhirtypes.ExpressionType = Field(
None,
alias="valueExpression",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueExtendedContactDetail: fhirtypes.ExtendedContactDetailType = Field(
None,
alias="valueExtendedContactDetail",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueHumanName: fhirtypes.HumanNameType = Field(
None,
alias="valueHumanName",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueId: fhirtypes.Id = Field(
None,
alias="valueId",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueId__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueId", title="Extension field for ``valueId``."
)
valueIdentifier: fhirtypes.IdentifierType = Field(
None,
alias="valueIdentifier",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueInstant: fhirtypes.Instant = Field(
None,
alias="valueInstant",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueInstant__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInstant", title="Extension field for ``valueInstant``."
)
valueInteger: fhirtypes.Integer = Field(
None,
alias="valueInteger",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueInteger__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInteger", title="Extension field for ``valueInteger``."
)
valueInteger64: fhirtypes.Integer64 = Field(
None,
alias="valueInteger64",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueInteger64__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueInteger64", title="Extension field for ``valueInteger64``."
)
valueMarkdown: fhirtypes.Markdown = Field(
None,
alias="valueMarkdown",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueMarkdown__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueMarkdown", title="Extension field for ``valueMarkdown``."
)
valueMeta: fhirtypes.MetaType = Field(
None,
alias="valueMeta",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueMoney: fhirtypes.MoneyType = Field(
None,
alias="valueMoney",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueOid: fhirtypes.Oid = Field(
None,
alias="valueOid",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueOid__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueOid", title="Extension field for ``valueOid``."
)
valueParameterDefinition: fhirtypes.ParameterDefinitionType = Field(
None,
alias="valueParameterDefinition",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valuePeriod: fhirtypes.PeriodType = Field(
None,
alias="valuePeriod",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valuePositiveInt: fhirtypes.PositiveInt = Field(
None,
alias="valuePositiveInt",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valuePositiveInt__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valuePositiveInt",
title="Extension field for ``valuePositiveInt``.",
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueRatio: fhirtypes.RatioType = Field(
None,
alias="valueRatio",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueRatioRange: fhirtypes.RatioRangeType = Field(
None,
alias="valueRatioRange",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueReference: fhirtypes.ReferenceType = Field(
None,
alias="valueReference",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueRelatedArtifact: fhirtypes.RelatedArtifactType = Field(
None,
alias="valueRelatedArtifact",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueSampledData: fhirtypes.SampledDataType = Field(
None,
alias="valueSampledData",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueSignature: fhirtypes.SignatureType = Field(
None,
alias="valueSignature",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueString", title="Extension field for ``valueString``."
)
valueTime: fhirtypes.Time = Field(
None,
alias="valueTime",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueTime", title="Extension field for ``valueTime``."
)
valueTiming: fhirtypes.TimingType = Field(
None,
alias="valueTiming",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueTriggerDefinition: fhirtypes.TriggerDefinitionType = Field(
None,
alias="valueTriggerDefinition",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueUnsignedInt: fhirtypes.UnsignedInt = Field(
None,
alias="valueUnsignedInt",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueUnsignedInt__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_valueUnsignedInt",
title="Extension field for ``valueUnsignedInt``.",
)
valueUri: fhirtypes.Uri = Field(
None,
alias="valueUri",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUri", title="Extension field for ``valueUri``."
)
valueUrl: fhirtypes.Url = Field(
None,
alias="valueUrl",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueUrl__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUrl", title="Extension field for ``valueUrl``."
)
valueUsageContext: fhirtypes.UsageContextType = Field(
None,
alias="valueUsageContext",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueUuid: fhirtypes.Uuid = Field(
None,
alias="valueUuid",
title="If parameter is a data type",
description="Conveys the content if the parameter is a data type.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e value[x]
one_of_many="value",
one_of_many_required=False,
)
valueUuid__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_valueUuid", title="Extension field for ``valueUuid``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``ParametersParameter`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"name",
"valueBase64Binary",
"valueBoolean",
"valueCanonical",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInstant",
"valueInteger",
"valueInteger64",
"valueMarkdown",
"valueOid",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueUuid",
"valueAddress",
"valueAge",
"valueAnnotation",
"valueAttachment",
"valueCodeableConcept",
"valueCodeableReference",
"valueCoding",
"valueContactPoint",
"valueCount",
"valueDistance",
"valueDuration",
"valueHumanName",
"valueIdentifier",
"valueMoney",
"valuePeriod",
"valueQuantity",
"valueRange",
"valueRatio",
"valueRatioRange",
"valueReference",
"valueSampledData",
"valueSignature",
"valueTiming",
"valueContactDetail",
"valueDataRequirement",
"valueExpression",
"valueParameterDefinition",
"valueRelatedArtifact",
"valueTriggerDefinition",
"valueUsageContext",
"valueAvailability",
"valueExtendedContactDetail",
"valueDosage",
"valueMeta",
"resource",
"part",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2167(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("name", "name__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2167(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"value": [
"valueAddress",
"valueAge",
"valueAnnotation",
"valueAttachment",
"valueAvailability",
"valueBase64Binary",
"valueBoolean",
"valueCanonical",
"valueCode",
"valueCodeableConcept",
"valueCodeableReference",
"valueCoding",
"valueContactDetail",
"valueContactPoint",
"valueCount",
"valueDataRequirement",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueDistance",
"valueDosage",
"valueDuration",
"valueExpression",
"valueExtendedContactDetail",
"valueHumanName",
"valueId",
"valueIdentifier",
"valueInstant",
"valueInteger",
"valueInteger64",
"valueMarkdown",
"valueMeta",
"valueMoney",
"valueOid",
"valueParameterDefinition",
"valuePeriod",
"valuePositiveInt",
"valueQuantity",
"valueRange",
"valueRatio",
"valueRatioRange",
"valueReference",
"valueRelatedArtifact",
"valueSampledData",
"valueSignature",
"valueString",
"valueTime",
"valueTiming",
"valueTriggerDefinition",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueUsageContext",
"valueUuid",
]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
2332fc6eb75e911cda4ac674a691601b23144c56 | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /N/NumberofSubsequencesThatSatisfytheGivenSumCondition.py | 2b3ac4e3f31a679a7d7875712e7e3752b79dbc87 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | '''
-Medium-
You are given an array of integers nums and an integer target.
Return the number of non-empty subsequences of nums such that the sum of the minimum and maximum element on it is less or equal to target. Since the answer may be too large, return it modulo 109 + 7.
Example 1:
Input: nums = [3,5,6,7], target = 9
Output: 4
Explanation: There are 4 subsequences that satisfy the condition.
[3] -> Min value + max value <= target (3 + 3 <= 9)
[3,5] -> (3 + 5 <= 9)
[3,5,6] -> (3 + 6 <= 9)
[3,6] -> (3 + 6 <= 9)
Example 2:
Input: nums = [3,3,6,8], target = 10
Output: 6
Explanation: There are 6 subsequences that satisfy the condition. (nums can have repeated numbers).
[3] , [3] , [3,3], [3,6] , [3,6] , [3,3,6]
Example 3:
Input: nums = [2,3,3,4,6,7], target = 12
Output: 61
Explanation: There are 63 non-empty subsequences, two of them do not satisfy the condition ([6,7], [7]).
Number of valid subsequences (63 - 2 = 61).
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 106
1 <= target <= 106
'''
from typing import List
import bisect
class Solution:
def numSubseq(self, nums: List[int], target: int) -> int:
A, n, mod = nums, len(nums), 10**9+7
A.sort()
ans = 0
for i in range(n):
idx = bisect.bisect_right(A[i:], target-A[i]) - 1
# print(i, idx)
if idx >= 0:
ans = (ans + (1 << idx)) % mod
# print(ans)
return ans
def numSubseq2(self, nums: List[int], target: int) -> int:
A, n, mod = nums, len(nums), 10**9+7
A.sort()
ans = 0
for i in range(n):
idx = bisect.bisect_right(A, target-A[i]) - i - 1
if idx >= 0:
ans = (ans + (1 << idx)) % mod
return ans
if __name__ == "__main__":
print(Solution().numSubseq(nums = [3,5,6,7], target = 9))
print(Solution().numSubseq(nums = [3,3,6,8], target = 10))
print(Solution().numSubseq(nums = [2,3,3,4,6,7], target = 12))
print(Solution().numSubseq2(nums = [3,5,6,7], target = 9))
print(Solution().numSubseq2(nums = [3,3,6,8], target = 10))
print(Solution().numSubseq2(nums = [2,3,3,4,6,7], target = 12)) | [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
f3cd108c4ee31b5498859b931cd6bc67e4d4b418 | e91f477713556f14b288b89ecce89754d4bd93f7 | /alpha-clipping/main.py | 8206fd8738006ed2f687e69b36b9dbbd85e3e019 | [
"MIT"
] | permissive | PepSalehi/algorithms | 715603ad16c320c0f1d32c544062b71b11814263 | 1c20f57185e6324aa840ccff98e69764b4213131 | refs/heads/master | 2020-12-28T23:24:39.542742 | 2019-02-01T05:17:56 | 2019-02-01T05:17:56 | 14,173,271 | 0 | 0 | MIT | 2019-02-01T05:17:57 | 2013-11-06T13:27:34 | Python | UTF-8 | Python | false | false | 5,195 | py | #!/usr/bin/env python
"""Example of the alpha clipping algorithm."""
def main():
"""Test some simple examples."""
pl = Point(0.0, 0.0)
pr = Point(10.0, 6.0)
p0 = Point(-3.0, 4.0)
p1 = Point(6.0, -2.0)
p3 = Point(-1.0, -1.0)
p4 = Point(4.0, 4.0)
p5 = Point(1.0, 100.0)
rectangle = Rectangle(pl, pr)
print(alpha_clipping(rectangle, Line(p1, pr)))
print(alpha_clipping(rectangle, Line(p3, pr)))
print(alpha_clipping(rectangle, Line(p3, p4)))
print(alpha_clipping(rectangle, Line(p1, p3)))
print(alpha_clipping(rectangle, Line(p3, p5)))
print(alpha_clipping(rectangle, Line(p0, p1)))
class Point(object):
"""A point identified by (x,y) coordinates."""
def __init__(self, x=0.0, y=0.0):
"""
Constructor for a point.
Parameters
----------
x : float
y : float
"""
assert isinstance(x, float), "x=%r is not a float" % x
assert isinstance(y, float), "y=%r is not a float" % y
self.x = x
self.y = y
def __str__(self):
return "P(%0.2f, %0.2f)" % (self.x, self.y)
def __repr__(self):
return str(self)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __rmul__(self, other): # :-/
assert isinstance(other, float), "other=%r is not a float" % other
return Point(other * self.x, other * self.y)
class Rectangle(object):
"""A rectangle identified by two points."""
def __init__(self, p1, p2):
"""
Constructor for a rectangle.
Parameters
----------
p1 : Point
p2 : Point
"""
assert isinstance(p1, Point), "p1=%r is not a point" % p1
assert isinstance(p2, Point), "p2=%r is not a point" % p2
self.p1 = p1
self.p2 = p2
self.x_min = min(p1.x, p2.x)
self.y_min = min(p1.y, p2.x)
self.x_max = max(p1.x, p2.x)
self.y_max = max(p1.y, p2.x)
def get_outcode(self, p):
"""
Get the outcode for a point p.
The values are (left, right, bottom, top).
Parameters
----------
p : Point
Returns
-------
list of 4 bools
"""
assert isinstance(p, Point), "p=%r is not a point" % p
outcode = [p.x < self.x_min,
p.x > self.x_max,
p.y < self.y_min,
p.y > self.y_max]
return outcode
def get_wec(self, e, p):
"""
Get the window edge coordiantes (WEC) of a point p according to edge e.
Parameters
----------
e : 0, 1, 2, 3
p : Point
Returns
-------
float
"""
assert e in [0, 1, 2, 3], "e=%s is not in [0, 1, 2, 3]" % str(e)
assert isinstance(p, Point), "p=%r is not a point" % p
if e == 0: # left
return p.x - self.x_min
elif e == 1: # right
return self.x_max - p.x
elif e == 2: # bottom
return p.y - self.y_min
elif e == 3: # top
return self.y_max - p.y
class Line(object):
"""A line identified by two points."""
def __init__(self, p1, p2):
"""
Constructor for a line.
Parameters
----------
p1 : Point
p2 : Point
"""
assert isinstance(p1, Point), "p1=%r is not a point" % p1
assert isinstance(p2, Point), "p2=%r is not a point" % p2
self.p1 = p1
self.p2 = p2
def __str__(self):
return "[%s, %s]" % (str(self.p1), str(self.p2))
def __repr__(self):
return str(self)
def alpha_clipping(rectangle, line):
"""
Apply alpha-clipping of `line` according to `rectangle`.
Parameters
----------
rectangle : Rectangle
line : Line
Returns
-------
`None` or Line within rectangle
"""
a_min = 0.0
a_max = 1.0
outcode_p1 = rectangle.get_outcode(line.p1)
outcode_p2 = rectangle.get_outcode(line.p2)
for e in range(4):
if outcode_p1[e] and outcode_p2[e]:
return None # trivial reject
if outcode_p1[e] or outcode_p2[e]:
# line intersects line
wec_p1 = rectangle.get_wec(e, line.p1)
wec_p2 = rectangle.get_wec(e, line.p2)
a_s = wec_p1 / (wec_p1 - wec_p2)
if outcode_p1[e]: # P1 is outside of the rectangle
a_min = max(a_min, a_s)
else:
a_max = min(a_max, a_s)
if a_min > a_max:
return None # non-trivial reject
else:
# Now we have a line which is parametrized like this:
# P1 + a * (P2 - P1) with a in [a_min, a_max]
# We want a line which is parametrized like this:
# P1' + a * (P2' - P1') with a in [0, 1]
print("a_min=%0.2f" % a_min)
print("a_max=%0.2f" % a_max)
p1s = line.p1 + a_min * (line.p2 - line.p1)
p2s = line.p1 + a_max * (line.p2 - line.p1)
return Line(p1s, p2s)
if __name__ == '__main__':
main()
| [
"info@martin-thoma.de"
] | info@martin-thoma.de |
bda61239f575bbe29c60da4f3ed441365a6650ae | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/ep/ipagingp.py | 24fe0ca702d011aa284b1fef708a92a52ea68b21 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 7,228 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IpAgingP(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.ep.IpAgingP")
meta.moClassName = "epIpAgingP"
meta.rnFormat = "ipAgingP-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "IP Aging Policy"
meta.writeAccessMask = 0x101000000001
meta.readAccessMask = 0x101000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.infra.Infra")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoInstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.L2InstPol")
meta.rnPrefixes = [
('ipAgingP-', True),
]
prop = PropMeta("str", "adminSt", "adminSt", 27969, PropCategory.REGULAR)
prop.label = "Admin State"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "disabled"
prop._addConstant("disabled", "disabled", 2)
prop._addConstant("enabled", "enabled", 1)
meta.props.add("adminSt", prop)
prop = PropMeta("str", "annotation", "annotation", 38574, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40713, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 27970, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
f4715eb58fdf816dd11aecfe15e38cb0f67c343e | f2ed44ff6a8e4f163680f53bd34845e3cac3d91c | /summarize/data/dataset_readers/sds/abstractive.py | 017d53831a96c5b3503813c07cadd9b1ba44f658 | [
"Apache-2.0"
] | permissive | CogComp/summary-cloze | 1a2b76ba2e19a8ca0a98e1b95e036dc1dfba17ad | b38e3e8c7755903477fd92a4cff27125cbf5553d | refs/heads/master | 2020-09-01T17:09:10.312107 | 2019-11-05T20:43:47 | 2019-11-05T20:43:47 | 219,012,685 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,880 | py | from allennlp.common.file_utils import cached_path
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import MetadataField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from overrides import overrides
from typing import Dict, Iterable, List, Optional
from summarize.data.io import JsonlReader
from summarize.data.paragraph_tokenizers import ParagraphTokenizer, ParagraphWordTokenizer
@DatasetReader.register('sds-abstractive')
class AbstractiveDatasetReader(DatasetReader):
"""
Reads a generic single-document summarization dataset for an abstractive
summarization model. Both the document and the summary is expected to be a
list of sentences of type ``List[str]`` in "document" and "summary" field names.
Parameters
----------
document_token_indexers: ``Dict[str, TokenIndexer]``, optional (default = ``{'tokens': SingleIdTokenIndexer()}``).
The token indexers used for the document tokens.
summary_token_indexers: ``Dict[str, TokenIndexer]``, optional.
The token indexers used for the summary tokens. If not provided, the default value
is set to be the same object as ``document_token_indexers``.
document_tokenizer: ``ParagraphTokenizer``, optional (default = ``ParagraphWordTokenizer``).
The tokenizer for the document text.
summary_tokenizer: ``ParagraphTokenizer``, optional.
The tokenizer for the summary text. If not provided, the default value is set
to be ``document_tokenizer``.
max_document_length: ``int``, optional (default = ``None``).
The maximum number of document tokens to use. If ``None``, no truncation is performed. The
truncation runs after the tokenization step, so this length number includes any ``start_tokens``,
``end_tokens``, etc. It does not ensure that the ``end_tokens`` will still be at the end
of the sequence.
max_summary_length: ``int``, optional (default = ``None``).
The maximum number of summary tokens to use. See ``max_document_length`` for more details.
"""
def __init__(self,
document_token_indexers: Optional[Dict[str, TokenIndexer]] = None,
summary_token_indexers: Optional[Dict[str, TokenIndexer]] = None,
document_tokenizer: Optional[ParagraphTokenizer] = None,
summary_tokenizer: Optional[ParagraphTokenizer] = None,
max_document_length: Optional[int] = None,
max_summary_length: Optional[int] = None,
lazy: bool = True) -> None:
super().__init__(lazy)
self.document_token_indexers = document_token_indexers or {'tokens': SingleIdTokenIndexer()}
self.summary_token_indexers = summary_token_indexers or self.document_token_indexers
self.document_tokenizer = document_tokenizer or ParagraphWordTokenizer()
self.summary_tokenizer = summary_tokenizer or self.document_tokenizer
self.max_document_length = max_document_length
self.max_summary_length = max_summary_length
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
file_path = cached_path(file_path)
with JsonlReader(file_path) as f:
for data in f:
document = data['document']
summary = data['summary']
yield self.text_to_instance(document, summary=summary)
@overrides
def text_to_instance(self, document: List[str], summary: Optional[List[str]] = None) -> Instance:
"""
Parameters
----------
document: ``List[str]``, required.
The list of document sentences.
summary: ``List[str]``, optional.
The list of summary sentences.
"""
fields = {}
# Setup the document field
tokenized_document = self.document_tokenizer.tokenize(document)
if self.max_document_length is not None:
tokenized_document = tokenized_document[:self.max_document_length]
document_field = TextField(tokenized_document, self.document_token_indexers)
fields['document'] = document_field
# Setup the summary field, if it exists
if summary is not None:
tokenized_summary = self.summary_tokenizer.tokenize(summary)
if self.max_summary_length is not None:
tokenized_summary = tokenized_summary[:self.max_summary_length]
summary_field = TextField(tokenized_summary, self.summary_token_indexers)
fields['summary'] = summary_field
# Pass the original data through as metadata
metadata = {}
metadata['document'] = document
if summary is not None:
metadata['summary'] = summary
fields['metadata'] = MetadataField(metadata)
return Instance(fields)
| [
"danfdeutsch@gmail.com"
] | danfdeutsch@gmail.com |
2568d6aa8b661c8dae6a0f85ab197fa18cd31c95 | 7f66c9818b2a22e6dbfa832a6bb4f9f21fbd15da | /semester_2/graph/lab_03/main.py | 5147ef29678cf898ae10c2835d540e0b4f05cb83 | [] | no_license | caprapaul/assignments | cc3992833d4f23f74286c1800ac38dc2d9a874da | 206b049700d8a3e03b52e57960cd44f85c415fe8 | refs/heads/master | 2023-05-24T03:46:42.858147 | 2022-05-03T16:26:58 | 2022-05-03T16:26:58 | 248,552,522 | 0 | 0 | null | 2023-05-09T01:49:04 | 2020-03-19T16:31:37 | C | UTF-8 | Python | false | false | 1,744 | py | import random
from directed_graph import DirectedGraph
from ui import UI
from service import Service
def random_graph(graph, vertex_count, edge_count):
current_edge_count = 0
for i in range(vertex_count):
graph.add_vertex(i)
while current_edge_count < edge_count:
from_vertex = random.randrange(0, vertex_count)
to_vertex = random.randrange(0, vertex_count)
cost = random.randrange(-10, 10)
if graph.get_edge(from_vertex, to_vertex) is None:
graph.add_edge(from_vertex, to_vertex, cost)
current_edge_count += 1
def load_from_file(graph, file_name: str):
with open(file_name, 'r') as data_file:
first_line = data_file.readline()
(vertex_count, edge_count) = first_line.split(' ')
vertex_count = int(vertex_count)
edge_count = int(edge_count)
for i in range(vertex_count):
graph.add_vertex(i)
for line in data_file:
(edge_start, edge_end, edge_cost) = line.split(' ')
edge_start = int(edge_start)
edge_end = int(edge_end)
edge_cost = int(edge_cost)
graph.add_edge(edge_start, edge_end, edge_cost)
def save_to_file(graph: DirectedGraph, file_name: str):
with open(file_name, 'w') as data_file:
output = f"{graph.vertices_count} {graph.edges_count}\n"
for edge in graph.parse_edges():
output += f"{edge.start} {edge.end} {edge.cost}\n"
data_file.write(output)
def run():
graph = DirectedGraph()
#random_graph(graph, 100, 10)
load_from_file(graph, "graph5.txt")
service = Service(graph)
ui = UI(graph, service)
ui.run()
save_to_file(graph, "graph_out.txt")
run()
| [
"c.paulica@gmail.com"
] | c.paulica@gmail.com |
c9c8afe833e1dc94693f45f0a75d7f2677ea1bf0 | aa73e301f658b45a9674df4b619b288945dd0669 | /branches/sal4_new_test_framework/examples/lts2graph.py | a0436c68a964f4d2e7727d7f517de6f605cb99bb | [
"MIT"
] | permissive | svn2github/python-graph2 | e1c37f77cc0a27ac9099208876c63693bffbc929 | f19039d7f3fc1f04977c3f1d1d6128e8545ebef1 | refs/heads/master | 2020-04-30T04:11:38.475209 | 2013-04-19T01:06:14 | 2013-04-19T01:06:14 | 9,714,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | #!/usr/bin/env python
# Copyright (c) Pedro Matiello <pmatiello@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
This small application will build and draw a graph for a given finite definite automaton described
as a labelled transition system.
This is a very naive, probably useless, possibly incorrect, barely tested implementation. No
validation is ever performed. Take care or it will burn your house and kill your cat.
"""
# Module metadata
__authors__ = "Pedro Matiello"
__license__ = "MIT"
# Imports
import sys
sys.path.append('..')
import pygraph
sys.path.append('/usr/lib/graphviz/python/')
sys.path.append('/usr/lib64/graphviz/python/')
import gv
def load_automaton(filename):
"""
Read a automaton described as a labelled transition system and build the equivalent graph.
@type filename: string
@param filename: Name of the file containing the LTS-described automaton.
@rtype: graph
@return: Automaton's graph.
"""
gr = pygraph.digraph()
infile = file(filename,'r')
line = infile.readline()
final = []
while (line):
line = line.replace("\n",'').split(' ')
datatype = line[0]
data = line[1:]
if (datatype == 'Q'):
# States
for each in data:
gr.add_node(each)
if (datatype == 'A'):
# Alphabet
pass
if (datatype == 'F'):
# Final states
final = final + data
if (datatype == 's'):
# Initial state
gr.add_node('.',attrs=[('shape','point')])
gr.add_edge('.',data[0])
if (datatype == 't'):
# Transitions
if (gr.has_edge(data[1], data[2])):
gr.set_edge_label(data[1], data[2], \
gr.edge_label(data[1], data[2]) + ', ' + data[0])
else:
gr.add_edge(data[1], data[2], label=data[0])
line = infile.readline()
for node in gr:
if (node in final and node != '.'):
gr.add_node_attribute(node, ('shape','doublecircle'))
elif (node != '.'):
gr.add_node_attribute(node, ('shape','circle'))
return gr, final
# Main
try:
filename = sys.argv[1]
gr, final = load_automaton(sys.argv[1])
dot = gr.write(fmt='dot')
except IndexError:
print "Syntax: %s filename" % sys.argv[0]
sys.exit(1)
except IOError:
print "Can't open file %s" % filename
sys.exit(2)
# Print graph as PNG image
gvv = gv.readstring(dot)
gv.layout(gvv,'circo')
gv.render(gvv,'png',filename + '.png')
| [
"salimfadhley@70df0079-b534-0410-988b-a5721c0f2d16"
] | salimfadhley@70df0079-b534-0410-988b-a5721c0f2d16 |
037de8db65e09ddb945a0032a8f52a6e11056bfc | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenBpaasContractQueryModel.py | aafa6e6f87c71ca0f87f9405737db7a130bcbd87 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,452 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenBpaasContractQueryModel(object):
def __init__(self):
self._bpaas_app_id = None
self._service_id = None
@property
def bpaas_app_id(self):
return self._bpaas_app_id
@bpaas_app_id.setter
def bpaas_app_id(self, value):
self._bpaas_app_id = value
@property
def service_id(self):
return self._service_id
@service_id.setter
def service_id(self, value):
self._service_id = value
def to_alipay_dict(self):
params = dict()
if self.bpaas_app_id:
if hasattr(self.bpaas_app_id, 'to_alipay_dict'):
params['bpaas_app_id'] = self.bpaas_app_id.to_alipay_dict()
else:
params['bpaas_app_id'] = self.bpaas_app_id
if self.service_id:
if hasattr(self.service_id, 'to_alipay_dict'):
params['service_id'] = self.service_id.to_alipay_dict()
else:
params['service_id'] = self.service_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenBpaasContractQueryModel()
if 'bpaas_app_id' in d:
o.bpaas_app_id = d['bpaas_app_id']
if 'service_id' in d:
o.service_id = d['service_id']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
28b16eba0d922a9a936d56ec71c8ab5c31fcb179 | 0515fdb187aeea949d7d9d7b6b5fc167a1a463f9 | /manage.py | 4a04ca27bb245df22947e878f36e298c3065a70e | [] | no_license | alimahdiyar/django-sql | 692541808cb3b768aa6836cb78eac60389e959aa | 59e19bbd9d40a530933e8c692c749ea20c2f19a0 | refs/heads/master | 2021-06-25T01:21:39.965678 | 2019-12-11T09:26:15 | 2019-12-11T09:26:15 | 227,322,014 | 0 | 1 | null | 2021-06-10T22:23:11 | 2019-12-11T09:08:10 | Python | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangosql.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"alimahdiyar77@gmail.com"
] | alimahdiyar77@gmail.com |
c4e8e69cf70a352f0113b54e9d57008ae864bf9b | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /smoketests/__init__.py | dbc926f1e38eba7021e4aac46cf302bf4a47fb84 | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 1,262 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`smoketests` -- Nova Integration "Smoke" Tests
=====================================================
.. automodule:: nova.volume
:platform: Unix
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
| [
"dkang@isi.edu"
] | dkang@isi.edu |
53a41e841c6260418bd1e7ae9a20c99f0b338dbc | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/578164_Find_line_text_another/recipe-578164.py | fb286a488650b18fb2f8b998295941577af9201d | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 975 | py | import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file1", help="First file whose lines you want to check")
parser.add_argument("file2", help="Second file, in which you want to search for lines from first file")
args = parser.parse_args()
file1 = open(args.file1)
file2 = open(args.file2)
print "Comparing:"
print args.file1
print "and"
print args.file2
print ""
print "Attempting to find lines in *file1* that are missing in *file2*"
print ""
file1array = file1.readlines()
file2a = file2.readlines()
lengthfile1array = len(file1array)
j=0;
for file1item in file1array:
j += 1
sys.stdout.write("Checking line#: %d/" %(j))
sys.stdout.write("%d \r" %(lengthfile1array))
i=0;
for file2item in file2a:
if file1item.rstrip() == file2item.rstrip():
i += 1
break
else:
i += 1
if i == len(file2a):
print "MISSING LINE FOUND at Line# " + str(j)
| [
"betty@qburst.com"
] | betty@qburst.com |
23dedd75381b9f1fe064efe634f7f8771b9ed2c0 | 565ae8473c545c43341f5511b9633e97f0e4da8b | /course2_python_fundamentals/10-Exams/1-MID-EXAM/PREPARATION/other-exams-for-exam-preparation/Mid_Exam_10_dec_2019/_2_Archery_Tournament.py | c801675c69580bc08321f897a5c16914079ec32a | [] | no_license | andriiburka/Web-Development-with-Python | 3934c1a3945bd983ab39d38b97f1af16fe784207 | b6927653a2c6a9cc10a8768395233e347624c49a | refs/heads/master | 2022-11-21T21:42:04.898254 | 2020-07-29T22:59:56 | 2020-07-29T22:59:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | integers = list(map(int, input().split('|')))
second_integers = []
while True:
judge_command = list(map(str, input().split('@')))
if judge_command[0] == 'Game over':
break
else:
index = int(judge_command[1])
length = int(judge_command[2])
if judge_command[0] == 'Shoot Left':
action = index - length
a = integers.pop(action)
integers.insert(action + 1, abs(a - 5))
elif judge_command[0] == 'Shoot Right'
if index + 1 == length:
action = index + length
for i in range(index + 1):
if integers[i] == length:
index = 0 + length
b = integers.pop(index - 1)
integers.insert(index, index - 5)
print(integers)
| [
"andriiburka@gmail.com"
] | andriiburka@gmail.com |
7c5d43f43b42b60f2b4114b30bbe086c274ce47d | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/client/messenger/proto/shared_errors.py | 967a966ccf6f0da8b65759b6db45d2cedde60407 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,826 | py | # 2015.11.10 21:30:27 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/proto/shared_errors.py
import BigWorld
from gui.Scaleform.locale.MESSENGER import MESSENGER as I18N_MESSENGER
from helpers import i18n
from messenger.m_constants import CLIENT_ERROR_NAMES, CLIENT_ACTION_NAMES, CLIENT_ERROR_ID
from messenger.proto.interfaces import IChatError
class I18nErrorID(object):
__slots__ = ('errorID',)
def __init__(self, errorID):
super(I18nErrorID, self).__init__()
self.errorID = errorID
def __repr__(self):
return '{0}'.format(self.getName())
def getName(self):
if self.errorID in CLIENT_ERROR_NAMES:
errorName = CLIENT_ERROR_NAMES[self.errorID]
else:
errorName = 'CLIENT_ERROR_{0}'.format(self.errorID)
return errorName
def getI18nKey(self):
return I18N_MESSENGER.client_error_shared(self.getName())
class I18nActionID(object):
__slots__ = ('actionID',)
def __init__(self, actionID):
super(I18nActionID, self).__init__()
self.actionID = actionID
def __repr__(self):
return '{0}'.format(self.getName())
def getName(self):
if self.actionID in CLIENT_ACTION_NAMES:
actionName = CLIENT_ACTION_NAMES[self.actionID]
else:
actionName = 'CLIENT_ACTION_{0}'.format(self.actionID)
return actionName
def getI18nName(self):
name = self.getName()
key = I18N_MESSENGER.client_action(name)
if key:
name = i18n.makeString(key)
return name
class ClientError(IChatError):
__slots__ = ('_error', '_kwargs')
def __init__(self, errorID, **kwargs):
self._error = self.createError(errorID)
self._kwargs = kwargs
def __repr__(self):
return '{0}(error={1})'.format(self.__class__.__name__, self._error)
def createError(self, errorID):
return I18nErrorID(errorID)
def getErrorID(self):
return self._error.errorID
def getErrorName(self):
return self._error.getName()
def getMessage(self):
key = self._error.getI18nKey()
if key:
result = i18n.makeString(key, **self._kwargs)
else:
result = self._error.getName()
if self._kwargs:
result = '{0}/{1}'.format(result, self._kwargs)
return result
class ClientActionError(ClientError):
__slots__ = ('_action',)
def __init__(self, actionID, errorID = None, **kwargs):
super(ClientActionError, self).__init__((errorID or CLIENT_ERROR_ID.GENERIC), **kwargs)
self._action = self.createAction(actionID)
def __repr__(self):
return '{0}(action={1}, error={2})'.format(self.__class__.__name__, self._action, self._error)
def createAction(self, actionID):
return I18nActionID(actionID)
def getActionID(self):
return self._action.actionID
def getMessage(self):
if 'actionName' not in self._kwargs:
self._kwargs['actionName'] = self._action.getI18nName()
return super(ClientActionError, self).getMessage()
class ChatCoolDownError(ClientActionError):
def __init__(self, actionID, coolDown = None):
if coolDown:
kwargs = {'floatArg1': coolDown}
else:
kwargs = {}
super(ChatCoolDownError, self).__init__(actionID, CLIENT_ERROR_ID.COOLDOWN, **kwargs)
def getMessage(self):
actionName = self._action.getI18nName()
if self._kwargs:
msg = i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_ACTION_IN_COOLDOWN, actionName=actionName, **self._kwargs)
else:
msg = i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_ACTION_IN_COOLDOWN_WO_PERIOD, actionName=actionName)
return msg
class ChatBanError(IChatError):
__slots__ = ('_endTime', '_reason')
def __init__(self, endTime, reason):
super(ChatBanError, self).__init__()
self._endTime = endTime
self._reason = reason
def getTitle(self):
return i18n.makeString(I18N_MESSENGER.SERVER_ERRORS_CHATBANNED_TITLE)
def getMessage(self):
if self._endTime:
banEndTime = BigWorld.wg_getLongDateFormat(self._endTime) + ' ' + BigWorld.wg_getShortTimeFormat(self._endTime)
msg = i18n.makeString('#chat:errors/chatbanned', banEndTime=banEndTime, banReason=self._reason)
else:
msg = i18n.makeString('#chat:errors/chatbannedpermanent', banReason=self._reason)
return msg
def isModal(self):
return True
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\proto\shared_errors.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:30:28 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
b14a6a992a76e45e9bff3f37e5aee488de432395 | 66b504cac41d9e02ef605613ef86911e647b7584 | /mergify_engine/tests/functional/actions/test_request_reviews.py | 5aa49cf05b92501795632c8df178d32ab52849e3 | [
"Apache-2.0"
] | permissive | Nytelife26/mergify-engine | 894217f88b93ed48df6f8700cf955826dad3173a | 98c2119e26021a39c7985baccf4f3e35500e7ab2 | refs/heads/master | 2023-04-04T09:48:06.904066 | 2021-04-08T18:29:40 | 2021-04-09T16:21:04 | 356,945,975 | 0 | 0 | Apache-2.0 | 2021-04-11T18:21:27 | 2021-04-11T18:21:27 | null | UTF-8 | Python | false | false | 6,561 | py | # -*- encoding: utf-8 -*-
#
# Copyright © 2018–2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
import yaml
from mergify_engine import context
from mergify_engine.actions import request_reviews
from mergify_engine.tests.functional import base
class TestRequestReviewsAction(base.FunctionalTestBase):
async def test_request_reviews_users(self):
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"request_reviews": {"users": ["mergify-test1"]}},
}
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
requests = await self.get_review_requests(pulls[0]["number"])
assert sorted(["mergify-test1"]) == sorted(
user["login"] for user in requests["users"]
)
async def test_request_reviews_teams(self):
team = (await self.get_teams())[0]
await self.add_team_permission(team["slug"], "push")
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"request_reviews": {"teams": [team["slug"]]}},
}
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
requests = await self.get_review_requests(pulls[0]["number"])
assert sorted([team["slug"]]) == sorted(
team["slug"] for team in requests["teams"]
)
@mock.patch.object(
request_reviews.RequestReviewsAction, "GITHUB_MAXIMUM_REVIEW_REQUEST", new=1
)
async def test_request_reviews_already_max(self):
rules = {
"pull_request_rules": [
{
"name": "approve",
"conditions": [f"base={self.master_branch_name}"],
"actions": {"review": {"type": "APPROVE"}},
},
{
"name": "request_reviews",
"conditions": [f"base={self.master_branch_name}"],
"actions": {
"request_reviews": {"users": ["mergify-test1", "mergify-test"]}
},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
requests = await self.get_review_requests(pulls[0]["number"])
assert ["mergify-test1"] == [user["login"] for user in requests["users"]]
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
for check in checks:
if check["name"] == "Rule: request_reviews (request_reviews)":
assert "neutral" == check["conclusion"]
assert (
"Maximum number of reviews already requested"
== check["output"]["title"]
)
assert (
"The maximum number of 1 reviews has been reached.\n"
"Unable to request reviews for additional users."
== check["output"]["summary"]
)
break
else:
pytest.fail("Unable to find request review check run")
@mock.patch.object(
request_reviews.RequestReviewsAction, "GITHUB_MAXIMUM_REVIEW_REQUEST", new=2
)
async def test_request_reviews_going_above_max(self):
rules = {
"pull_request_rules": [
{
"name": "request_reviews",
"conditions": [
f"base={self.master_branch_name}",
"#review-requested>0",
],
"actions": {
"request_reviews": {
"users": ["mergify-test1", "mergify-test3"],
"teams": ["mergifyio-testing/testing"],
}
},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls(base=self.master_branch_name)
assert 1 == len(pulls)
await self.create_review_request(pulls[0]["number"], ["mergify-test1"])
await self.run_engine()
requests = await self.get_review_requests(pulls[0]["number"])
assert sorted(["mergify-test1", "mergify-test3"]) == sorted(
user["login"] for user in requests["users"]
)
ctxt = await context.Context.create(self.repository_ctxt, p, [])
checks = await ctxt.pull_engine_check_runs
assert len(checks) == 2
for check in checks:
if check["name"] == "Rule: request_reviews (request_reviews)":
assert "neutral" == check["conclusion"]
assert (
"Maximum number of reviews already requested"
== check["output"]["title"]
)
assert (
"The maximum number of 2 reviews has been reached.\n"
"Unable to request reviews for additional users."
== check["output"]["summary"]
)
break
else:
pytest.fail("Unable to find request review check run")
| [
"37929162+mergify[bot]@users.noreply.github.com"
] | 37929162+mergify[bot]@users.noreply.github.com |
194e207bc958493908fac766954695e21e84316a | 7b65a38aca6fc4fb6f39bc5de1e0f352f3ab7e25 | /s9/9.1_espacios.py | b37d38a4121ed466d8d9f17a99e7eb815f8d0ca6 | [] | no_license | camohe90/-mision_tic_G1 | 96b10e4ae14278cf53f0a87638643112e2f81709 | f083c8a0a133b9be1a8d6e8f61cde46cd1aa75e5 | refs/heads/master | 2023-05-26T03:53:25.660868 | 2021-06-09T12:44:48 | 2021-06-09T12:44:48 | 371,374,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | # Usando 4 espacios
def es_par(num):
if num % 2 == 0:
print("par")
return True
else:
return False
| [
"camohe90@gmail.com"
] | camohe90@gmail.com |
dc227e7b0d70151dd193d560dc8cd7da9835d83e | 41d9b92ef2a74a4ba05d27ffbe3beb87884c4ce7 | /math/0x03-probability/normal.py | 5db0c14284738923b0e7d0984afd2984ea74f577 | [] | no_license | JosephK89/holbertonschool-machine_learning | 3f96d886c61d8de99a23e4348fb045b9c930740e | aa5c500f7d8ebeec951f9ab5ec017cae64007c25 | refs/heads/main | 2023-08-14T18:42:53.481354 | 2021-10-10T19:53:40 | 2021-10-10T19:53:40 | 386,248,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | #!/usr/bin/env python3
"""normal distribution module"""
class Normal:
""""Noraml class"""
def __init__(self, data=None, mean=0., stddev=1.):
"""initialization"""
if data is None:
if stddev <= 0:
raise ValueError("stddev must be a positive value")
else:
self.mean = float(mean)
self.stddev = float(stddev)
else:
if type(data) != list:
raise TypeError("data must be a list")
elif len(data) < 2:
raise ValueError("data must contain multiple values")
else:
self.mean = (sum(data) / len(data))
s = 0
for x in range(0, len(data)):
s = s + ((data[x] - self.mean))**2
self.stddev = (s/len(data))**(1/2)
def z_score(self, x):
"""z-score function"""
return ((x - self.mean) / self.stddev)
def x_value(self, z):
"""x-value function"""
return self.stddev * z + self.mean
def pdf(self, x):
"""pdf function"""
return (2.7182818285**((-1/2) * (((
x - self.mean) / self.stddev)**2))) * (
1 / (self.stddev * (2 * 3.1415926536) ** (1/2)))
def cdf(self, x):
"""cdf function"""
num = (x - self.mean) / (self.stddev * (2**(1/2)))
erf = (2 / (3.1415926536**(1/2))) * (num - (num**3)/3 + (
num**5)/10 - (num**7)/42 + (num**9)/216)
return (1 / 2) * (1 + erf)
| [
"josephkamel262@gmail.com"
] | josephkamel262@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.